@lssm/lib.evolution 0.0.0-canary-20251217060834 → 0.0.0-canary-20251217072406
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-agent/dist/approval/index.js +1 -0
- package/dist/ai-agent/dist/approval/workflow.js +1 -0
- package/dist/analyzer/spec-analyzer.d.ts +1 -1
- package/dist/analyzer/spec-analyzer.js +305 -1
- package/dist/approval/integration.js +125 -1
- package/dist/generator/ai-spec-generator.js +175 -9
- package/dist/generator/spec-generator.d.ts +1 -1
- package/dist/generator/spec-generator.js +120 -1
- package/dist/index.js +6 -1
- package/dist/lifecycle/dist/index.js +4 -0
- package/dist/lifecycle/dist/types/milestones.js +1 -0
- package/dist/lifecycle/dist/types/signals.js +1 -0
- package/dist/lifecycle/dist/types/stages.js +151 -0
- package/dist/lifecycle/dist/utils/formatters.js +1 -0
- package/dist/observability/dist/index.js +8 -0
- package/dist/observability/dist/intent/aggregator.js +4 -0
- package/dist/observability/dist/intent/detector.js +1 -0
- package/dist/observability/dist/lifecycle/dist/index.js +4 -0
- package/dist/observability/dist/lifecycle/dist/types/milestones.js +1 -0
- package/dist/observability/dist/lifecycle/dist/types/signals.js +1 -0
- package/dist/observability/dist/lifecycle/dist/types/stages.js +142 -0
- package/dist/observability/dist/lifecycle/dist/utils/formatters.js +1 -0
- package/dist/observability/dist/logging/index.js +39 -0
- package/dist/observability/dist/metrics/index.js +22 -0
- package/dist/observability/dist/pipeline/evolution-pipeline.js +3 -0
- package/dist/observability/dist/pipeline/lifecycle-pipeline.js +4 -0
- package/dist/observability/dist/tracing/index.js +1 -0
- package/dist/observability/dist/tracing/middleware.js +2 -0
- package/dist/types.d.ts +1 -1
- package/package.json +8 -8
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import "./workflow.js";
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import { randomUUID } from "node:crypto";
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { IntentPattern, OperationMetricSample, OptimizationHint, SpecAnomaly, SpecUsageStats } from "../types.js";
|
|
2
|
-
import { Logger } from "@lssm/lib.observability";
|
|
3
2
|
import { LifecycleStage } from "@lssm/lib.lifecycle";
|
|
3
|
+
import { Logger } from "@lssm/lib.observability";
|
|
4
4
|
|
|
5
5
|
//#region src/analyzer/spec-analyzer.d.ts
|
|
6
6
|
interface SpecAnalyzerOptions {
|
|
@@ -1 +1,305 @@
|
|
|
1
|
-
import
|
|
1
|
+
import "../observability/dist/index.js";
|
|
2
|
+
import { LifecycleStage } from "../lifecycle/dist/types/stages.js";
|
|
3
|
+
import "../lifecycle/dist/index.js";
|
|
4
|
+
import { randomUUID } from "node:crypto";
|
|
5
|
+
|
|
6
|
+
//#region src/analyzer/spec-analyzer.ts
|
|
7
|
+
const DEFAULT_OPTIONS = {
|
|
8
|
+
minSampleSize: 50,
|
|
9
|
+
errorRateThreshold: .05,
|
|
10
|
+
latencyP99ThresholdMs: 750
|
|
11
|
+
};
|
|
12
|
+
var SpecAnalyzer = class {
|
|
13
|
+
logger;
|
|
14
|
+
minSampleSize;
|
|
15
|
+
errorRateThreshold;
|
|
16
|
+
latencyP99ThresholdMs;
|
|
17
|
+
throughputDropThreshold;
|
|
18
|
+
constructor(options = {}) {
|
|
19
|
+
this.logger = options.logger;
|
|
20
|
+
this.minSampleSize = options.minSampleSize ?? DEFAULT_OPTIONS.minSampleSize;
|
|
21
|
+
this.errorRateThreshold = options.errorRateThreshold ?? DEFAULT_OPTIONS.errorRateThreshold;
|
|
22
|
+
this.latencyP99ThresholdMs = options.latencyP99ThresholdMs ?? DEFAULT_OPTIONS.latencyP99ThresholdMs;
|
|
23
|
+
this.throughputDropThreshold = options.throughputDropThreshold ?? .2;
|
|
24
|
+
}
|
|
25
|
+
analyzeSpecUsage(samples) {
|
|
26
|
+
if (!samples.length) {
|
|
27
|
+
this.logger?.debug("SpecAnalyzer.analyzeSpecUsage.skip", { reason: "no-samples" });
|
|
28
|
+
return [];
|
|
29
|
+
}
|
|
30
|
+
const groups = /* @__PURE__ */ new Map();
|
|
31
|
+
for (const sample of samples) {
|
|
32
|
+
const key = this.operationKey(sample);
|
|
33
|
+
const arr = groups.get(key) ?? [];
|
|
34
|
+
arr.push(sample);
|
|
35
|
+
groups.set(key, arr);
|
|
36
|
+
}
|
|
37
|
+
return [...groups.values()].filter((samplesForOp) => {
|
|
38
|
+
const valid = samplesForOp.length >= this.minSampleSize;
|
|
39
|
+
if (!valid) this.logger?.debug("SpecAnalyzer.analyzeSpecUsage.skipOperation", {
|
|
40
|
+
operation: this.operationKey(samplesForOp[0]),
|
|
41
|
+
sampleSize: samplesForOp.length,
|
|
42
|
+
minSampleSize: this.minSampleSize
|
|
43
|
+
});
|
|
44
|
+
return valid;
|
|
45
|
+
}).map((operationSamples) => this.buildUsageStats(operationSamples));
|
|
46
|
+
}
|
|
47
|
+
detectAnomalies(stats, baseline) {
|
|
48
|
+
const anomalies = [];
|
|
49
|
+
if (!stats.length) {
|
|
50
|
+
this.logger?.debug("SpecAnalyzer.detectAnomalies.skip", { reason: "no-stats" });
|
|
51
|
+
return anomalies;
|
|
52
|
+
}
|
|
53
|
+
const baselineByOp = new Map((baseline ?? []).map((item) => [this.operationKey(item.operation), item]));
|
|
54
|
+
for (const stat of stats) {
|
|
55
|
+
const evidence = [];
|
|
56
|
+
if (stat.errorRate >= this.errorRateThreshold) {
|
|
57
|
+
evidence.push({
|
|
58
|
+
type: "telemetry",
|
|
59
|
+
description: `Error rate ${stat.errorRate.toFixed(2)} exceeded threshold ${this.errorRateThreshold}`,
|
|
60
|
+
data: { errorRate: stat.errorRate }
|
|
61
|
+
});
|
|
62
|
+
anomalies.push({
|
|
63
|
+
operation: stat.operation,
|
|
64
|
+
severity: this.toSeverity(stat.errorRate / this.errorRateThreshold),
|
|
65
|
+
metric: "error-rate",
|
|
66
|
+
description: "Error rate spike",
|
|
67
|
+
detectedAt: /* @__PURE__ */ new Date(),
|
|
68
|
+
threshold: this.errorRateThreshold,
|
|
69
|
+
observedValue: stat.errorRate,
|
|
70
|
+
evidence
|
|
71
|
+
});
|
|
72
|
+
continue;
|
|
73
|
+
}
|
|
74
|
+
if (stat.p99LatencyMs >= this.latencyP99ThresholdMs) {
|
|
75
|
+
evidence.push({
|
|
76
|
+
type: "telemetry",
|
|
77
|
+
description: `P99 latency ${stat.p99LatencyMs}ms exceeded threshold ${this.latencyP99ThresholdMs}ms`,
|
|
78
|
+
data: { p99LatencyMs: stat.p99LatencyMs }
|
|
79
|
+
});
|
|
80
|
+
anomalies.push({
|
|
81
|
+
operation: stat.operation,
|
|
82
|
+
severity: this.toSeverity(stat.p99LatencyMs / this.latencyP99ThresholdMs),
|
|
83
|
+
metric: "latency",
|
|
84
|
+
description: "Latency regression detected",
|
|
85
|
+
detectedAt: /* @__PURE__ */ new Date(),
|
|
86
|
+
threshold: this.latencyP99ThresholdMs,
|
|
87
|
+
observedValue: stat.p99LatencyMs,
|
|
88
|
+
evidence
|
|
89
|
+
});
|
|
90
|
+
continue;
|
|
91
|
+
}
|
|
92
|
+
const baselineStat = baselineByOp.get(this.operationKey(stat.operation));
|
|
93
|
+
if (baselineStat) {
|
|
94
|
+
const drop = (baselineStat.totalCalls - stat.totalCalls) / baselineStat.totalCalls;
|
|
95
|
+
if (drop >= this.throughputDropThreshold) {
|
|
96
|
+
evidence.push({
|
|
97
|
+
type: "telemetry",
|
|
98
|
+
description: `Throughput dropped by ${(drop * 100).toFixed(1)}% compared to baseline`,
|
|
99
|
+
data: {
|
|
100
|
+
baselineCalls: baselineStat.totalCalls,
|
|
101
|
+
currentCalls: stat.totalCalls
|
|
102
|
+
}
|
|
103
|
+
});
|
|
104
|
+
anomalies.push({
|
|
105
|
+
operation: stat.operation,
|
|
106
|
+
severity: this.toSeverity(drop / this.throughputDropThreshold),
|
|
107
|
+
metric: "throughput",
|
|
108
|
+
description: "Usage drop detected",
|
|
109
|
+
detectedAt: /* @__PURE__ */ new Date(),
|
|
110
|
+
threshold: this.throughputDropThreshold,
|
|
111
|
+
observedValue: drop,
|
|
112
|
+
evidence
|
|
113
|
+
});
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
return anomalies;
|
|
118
|
+
}
|
|
119
|
+
toIntentPatterns(anomalies, stats) {
|
|
120
|
+
const statsByOp = new Map(stats.map((item) => [this.operationKey(item.operation), item]));
|
|
121
|
+
return anomalies.map((anomaly) => {
|
|
122
|
+
const stat = statsByOp.get(this.operationKey(anomaly.operation));
|
|
123
|
+
const confidence = {
|
|
124
|
+
score: Math.min(1, (anomaly.observedValue ?? 0) / (anomaly.threshold ?? 1)),
|
|
125
|
+
sampleSize: stat?.totalCalls ?? 0,
|
|
126
|
+
pValue: void 0
|
|
127
|
+
};
|
|
128
|
+
return {
|
|
129
|
+
id: randomUUID(),
|
|
130
|
+
type: this.mapMetricToIntent(anomaly.metric),
|
|
131
|
+
description: anomaly.description,
|
|
132
|
+
operation: anomaly.operation,
|
|
133
|
+
confidence,
|
|
134
|
+
metadata: {
|
|
135
|
+
observedValue: anomaly.observedValue,
|
|
136
|
+
threshold: anomaly.threshold
|
|
137
|
+
},
|
|
138
|
+
evidence: anomaly.evidence
|
|
139
|
+
};
|
|
140
|
+
});
|
|
141
|
+
}
|
|
142
|
+
suggestOptimizations(stats, anomalies, lifecycleContext) {
|
|
143
|
+
const anomaliesByOp = new Map(this.groupByOperation(anomalies));
|
|
144
|
+
const hints = [];
|
|
145
|
+
for (const stat of stats) {
|
|
146
|
+
const opKey = this.operationKey(stat.operation);
|
|
147
|
+
const opAnomalies = anomaliesByOp.get(opKey) ?? [];
|
|
148
|
+
for (const anomaly of opAnomalies) if (anomaly.metric === "latency") hints.push(this.applyLifecycleContext({
|
|
149
|
+
operation: stat.operation,
|
|
150
|
+
category: "performance",
|
|
151
|
+
summary: "Latency regression detected",
|
|
152
|
+
justification: `P99 latency at ${stat.p99LatencyMs}ms`,
|
|
153
|
+
recommendedActions: ["Add batching or caching layer", "Replay golden tests to capture slow inputs"]
|
|
154
|
+
}, lifecycleContext?.stage));
|
|
155
|
+
else if (anomaly.metric === "error-rate") {
|
|
156
|
+
const topError = Object.entries(stat.topErrors).sort((a, b) => b[1] - a[1])[0]?.[0];
|
|
157
|
+
hints.push(this.applyLifecycleContext({
|
|
158
|
+
operation: stat.operation,
|
|
159
|
+
category: "error-handling",
|
|
160
|
+
summary: "Error spike detected",
|
|
161
|
+
justification: topError ? `Dominant error code ${topError}` : "Increase in failures",
|
|
162
|
+
recommendedActions: ["Generate regression spec from failing payloads", "Add policy guardrails before rollout"]
|
|
163
|
+
}, lifecycleContext?.stage));
|
|
164
|
+
} else if (anomaly.metric === "throughput") hints.push(this.applyLifecycleContext({
|
|
165
|
+
operation: stat.operation,
|
|
166
|
+
category: "performance",
|
|
167
|
+
summary: "Throughput drop detected",
|
|
168
|
+
justification: "Significant traffic reduction relative to baseline",
|
|
169
|
+
recommendedActions: ["Validate routing + feature flag bucketing", "Backfill spec variant to rehydrate demand"]
|
|
170
|
+
}, lifecycleContext?.stage));
|
|
171
|
+
}
|
|
172
|
+
return hints;
|
|
173
|
+
}
|
|
174
|
+
operationKey(op) {
|
|
175
|
+
const coordinate = "operation" in op ? op.operation : op;
|
|
176
|
+
return `${coordinate.name}.v${coordinate.version}${coordinate.tenantId ? `@${coordinate.tenantId}` : ""}`;
|
|
177
|
+
}
|
|
178
|
+
buildUsageStats(samples) {
|
|
179
|
+
const durations = samples.map((s) => s.durationMs).sort((a, b) => a - b);
|
|
180
|
+
const errors = samples.filter((s) => !s.success);
|
|
181
|
+
const totalCalls = samples.length;
|
|
182
|
+
const successRate = (totalCalls - errors.length) / totalCalls;
|
|
183
|
+
const errorRate = errors.length / totalCalls;
|
|
184
|
+
const averageLatencyMs = durations.reduce((sum, value) => sum + value, 0) / totalCalls;
|
|
185
|
+
const topErrors = errors.reduce((acc, sample) => {
|
|
186
|
+
if (!sample.errorCode) return acc;
|
|
187
|
+
acc[sample.errorCode] = (acc[sample.errorCode] ?? 0) + 1;
|
|
188
|
+
return acc;
|
|
189
|
+
}, {});
|
|
190
|
+
const timestamps = samples.map((s) => s.timestamp.getTime());
|
|
191
|
+
const windowStart = new Date(Math.min(...timestamps));
|
|
192
|
+
const windowEnd = new Date(Math.max(...timestamps));
|
|
193
|
+
return {
|
|
194
|
+
operation: samples[0].operation,
|
|
195
|
+
totalCalls,
|
|
196
|
+
successRate,
|
|
197
|
+
errorRate,
|
|
198
|
+
averageLatencyMs,
|
|
199
|
+
p95LatencyMs: percentile(durations, .95),
|
|
200
|
+
p99LatencyMs: percentile(durations, .99),
|
|
201
|
+
maxLatencyMs: Math.max(...durations),
|
|
202
|
+
lastSeenAt: windowEnd,
|
|
203
|
+
windowStart,
|
|
204
|
+
windowEnd,
|
|
205
|
+
topErrors
|
|
206
|
+
};
|
|
207
|
+
}
|
|
208
|
+
toSeverity(ratio) {
|
|
209
|
+
if (ratio >= 2) return "high";
|
|
210
|
+
if (ratio >= 1.3) return "medium";
|
|
211
|
+
return "low";
|
|
212
|
+
}
|
|
213
|
+
mapMetricToIntent(metric) {
|
|
214
|
+
switch (metric) {
|
|
215
|
+
case "error-rate": return "error-spike";
|
|
216
|
+
case "latency": return "latency-regression";
|
|
217
|
+
case "throughput": return "throughput-drop";
|
|
218
|
+
default: return "schema-mismatch";
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
groupByOperation(items) {
|
|
222
|
+
const map = /* @__PURE__ */ new Map();
|
|
223
|
+
for (const item of items) {
|
|
224
|
+
const key = this.operationKey(item.operation);
|
|
225
|
+
const arr = map.get(key) ?? [];
|
|
226
|
+
arr.push(item);
|
|
227
|
+
map.set(key, arr);
|
|
228
|
+
}
|
|
229
|
+
return map;
|
|
230
|
+
}
|
|
231
|
+
applyLifecycleContext(hint, stage) {
|
|
232
|
+
if (stage === void 0) return hint;
|
|
233
|
+
const advice = LIFECYCLE_HINTS[mapStageBand(stage)]?.[hint.category];
|
|
234
|
+
if (!advice) return {
|
|
235
|
+
...hint,
|
|
236
|
+
lifecycleStage: stage
|
|
237
|
+
};
|
|
238
|
+
return {
|
|
239
|
+
...hint,
|
|
240
|
+
lifecycleStage: stage,
|
|
241
|
+
lifecycleNotes: advice.message,
|
|
242
|
+
recommendedActions: dedupeActions([...hint.recommendedActions, ...advice.supplementalActions])
|
|
243
|
+
};
|
|
244
|
+
}
|
|
245
|
+
};
|
|
246
|
+
function percentile(values, p) {
|
|
247
|
+
if (!values.length) return 0;
|
|
248
|
+
if (values.length === 1) return values[0];
|
|
249
|
+
return values[Math.min(values.length - 1, Math.floor(p * values.length))];
|
|
250
|
+
}
|
|
251
|
+
const mapStageBand = (stage) => {
|
|
252
|
+
if (stage <= 2) return "early";
|
|
253
|
+
if (stage === LifecycleStage.ProductMarketFit) return "pmf";
|
|
254
|
+
if (stage === LifecycleStage.GrowthScaleUp || stage === LifecycleStage.ExpansionPlatform) return "scale";
|
|
255
|
+
return "mature";
|
|
256
|
+
};
|
|
257
|
+
const LIFECYCLE_HINTS = {
|
|
258
|
+
early: {
|
|
259
|
+
performance: {
|
|
260
|
+
message: "Favor guardrails that protect learning velocity before heavy rewrites.",
|
|
261
|
+
supplementalActions: ["Wrap risky changes behind progressive delivery flags"]
|
|
262
|
+
},
|
|
263
|
+
"error-handling": {
|
|
264
|
+
message: "Make failures loud and recoverable so you can learn faster.",
|
|
265
|
+
supplementalActions: ["Add auto-rollbacks or manual kill switches"]
|
|
266
|
+
}
|
|
267
|
+
},
|
|
268
|
+
pmf: { performance: {
|
|
269
|
+
message: "Stabilize the core use case to avoid regressions while demand grows.",
|
|
270
|
+
supplementalActions: ["Instrument regression tests on critical specs"]
|
|
271
|
+
} },
|
|
272
|
+
scale: {
|
|
273
|
+
performance: {
|
|
274
|
+
message: "Prioritize resilience and multi-tenant safety as volumes expand.",
|
|
275
|
+
supplementalActions: ["Introduce workload partitioning or isolation per tenant"]
|
|
276
|
+
},
|
|
277
|
+
"error-handling": {
|
|
278
|
+
message: "Contain blast radius with policy fallbacks and circuit breakers.",
|
|
279
|
+
supplementalActions: ["Add circuit breakers to high-risk operations"]
|
|
280
|
+
}
|
|
281
|
+
},
|
|
282
|
+
mature: {
|
|
283
|
+
performance: {
|
|
284
|
+
message: "Optimize for margins and predictable SLAs.",
|
|
285
|
+
supplementalActions: ["Capture unit-cost impacts alongside latency fixes"]
|
|
286
|
+
},
|
|
287
|
+
"error-handling": {
|
|
288
|
+
message: "Prevent regressions with automated regression specs before deploy.",
|
|
289
|
+
supplementalActions: ["Run auto-evolution simulations on renewal scenarios"]
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
};
|
|
293
|
+
const dedupeActions = (actions) => {
|
|
294
|
+
const seen = /* @__PURE__ */ new Set();
|
|
295
|
+
const ordered = [];
|
|
296
|
+
for (const action of actions) {
|
|
297
|
+
if (seen.has(action)) continue;
|
|
298
|
+
seen.add(action);
|
|
299
|
+
ordered.push(action);
|
|
300
|
+
}
|
|
301
|
+
return ordered;
|
|
302
|
+
};
|
|
303
|
+
|
|
304
|
+
//#endregion
|
|
305
|
+
export { SpecAnalyzer };
|
|
@@ -1 +1,125 @@
|
|
|
1
|
-
import
|
|
1
|
+
import "../ai-agent/dist/approval/index.js";
|
|
2
|
+
import { mkdir, writeFile } from "node:fs/promises";
|
|
3
|
+
import { join } from "node:path";
|
|
4
|
+
|
|
5
|
+
//#region src/approval/integration.ts
|
|
6
|
+
var SpecSuggestionOrchestrator = class {
|
|
7
|
+
constructor(options) {
|
|
8
|
+
this.options = options;
|
|
9
|
+
}
|
|
10
|
+
async submit(suggestion, session, approvalReason) {
|
|
11
|
+
await this.options.repository.create(suggestion);
|
|
12
|
+
if (session && this.options.approval) await this.options.approval.requestApproval({
|
|
13
|
+
sessionId: session.sessionId,
|
|
14
|
+
agentId: session.agentId,
|
|
15
|
+
tenantId: session.tenantId,
|
|
16
|
+
toolName: "evolution.apply_suggestion",
|
|
17
|
+
toolCallId: suggestion.id,
|
|
18
|
+
toolArgs: { suggestionId: suggestion.id },
|
|
19
|
+
reason: approvalReason ?? suggestion.proposal.summary,
|
|
20
|
+
payload: { suggestionId: suggestion.id }
|
|
21
|
+
});
|
|
22
|
+
return suggestion;
|
|
23
|
+
}
|
|
24
|
+
async approve(id, reviewer, notes) {
|
|
25
|
+
const suggestion = await this.ensureSuggestion(id);
|
|
26
|
+
await this.options.repository.updateStatus(id, "approved", {
|
|
27
|
+
reviewer,
|
|
28
|
+
notes,
|
|
29
|
+
decidedAt: /* @__PURE__ */ new Date()
|
|
30
|
+
});
|
|
31
|
+
if (this.options.writer) await this.options.writer.write({
|
|
32
|
+
...suggestion,
|
|
33
|
+
status: "approved",
|
|
34
|
+
approvals: {
|
|
35
|
+
reviewer,
|
|
36
|
+
notes,
|
|
37
|
+
decidedAt: /* @__PURE__ */ new Date(),
|
|
38
|
+
status: "approved"
|
|
39
|
+
}
|
|
40
|
+
});
|
|
41
|
+
}
|
|
42
|
+
async reject(id, reviewer, notes) {
|
|
43
|
+
await this.options.repository.updateStatus(id, "rejected", {
|
|
44
|
+
reviewer,
|
|
45
|
+
notes,
|
|
46
|
+
decidedAt: /* @__PURE__ */ new Date()
|
|
47
|
+
});
|
|
48
|
+
}
|
|
49
|
+
list(filters) {
|
|
50
|
+
return this.options.repository.list(filters);
|
|
51
|
+
}
|
|
52
|
+
async ensureSuggestion(id) {
|
|
53
|
+
const suggestion = await this.options.repository.getById(id);
|
|
54
|
+
if (!suggestion) throw new Error(`Spec suggestion ${id} not found`);
|
|
55
|
+
return suggestion;
|
|
56
|
+
}
|
|
57
|
+
};
|
|
58
|
+
var FileSystemSuggestionWriter = class {
|
|
59
|
+
outputDir;
|
|
60
|
+
filenameTemplate;
|
|
61
|
+
constructor(options = {}) {
|
|
62
|
+
this.outputDir = options.outputDir ?? join(process.cwd(), "packages/libs/contracts/src/generated");
|
|
63
|
+
this.filenameTemplate = options.filenameTemplate ?? ((suggestion) => `${suggestion.target?.name ?? suggestion.intent.id}.v${suggestion.target?.version ?? "next"}.suggestion.json`);
|
|
64
|
+
}
|
|
65
|
+
async write(suggestion) {
|
|
66
|
+
await mkdir(this.outputDir, { recursive: true });
|
|
67
|
+
const filename = this.filenameTemplate(suggestion);
|
|
68
|
+
const filepath = join(this.outputDir, filename);
|
|
69
|
+
const payload = serializeSuggestion(suggestion);
|
|
70
|
+
await writeFile(filepath, JSON.stringify(payload, null, 2));
|
|
71
|
+
return filepath;
|
|
72
|
+
}
|
|
73
|
+
};
|
|
74
|
+
var InMemorySpecSuggestionRepository = class {
|
|
75
|
+
items = /* @__PURE__ */ new Map();
|
|
76
|
+
async create(suggestion) {
|
|
77
|
+
this.items.set(suggestion.id, suggestion);
|
|
78
|
+
}
|
|
79
|
+
async getById(id) {
|
|
80
|
+
return this.items.get(id);
|
|
81
|
+
}
|
|
82
|
+
async updateStatus(id, status, metadata) {
|
|
83
|
+
const suggestion = await this.getById(id);
|
|
84
|
+
if (!suggestion) return;
|
|
85
|
+
this.items.set(id, {
|
|
86
|
+
...suggestion,
|
|
87
|
+
status,
|
|
88
|
+
approvals: {
|
|
89
|
+
reviewer: metadata?.reviewer,
|
|
90
|
+
notes: metadata?.notes,
|
|
91
|
+
decidedAt: metadata?.decidedAt,
|
|
92
|
+
status
|
|
93
|
+
}
|
|
94
|
+
});
|
|
95
|
+
}
|
|
96
|
+
async list(filters) {
|
|
97
|
+
const values = [...this.items.values()];
|
|
98
|
+
if (!filters) return values;
|
|
99
|
+
return values.filter((item) => {
|
|
100
|
+
if (filters.status && item.status !== filters.status) return false;
|
|
101
|
+
if (filters.operationName && item.target?.name !== filters.operationName) return false;
|
|
102
|
+
return true;
|
|
103
|
+
});
|
|
104
|
+
}
|
|
105
|
+
};
|
|
106
|
+
function serializeSuggestion(suggestion) {
|
|
107
|
+
const { proposal, ...rest } = suggestion;
|
|
108
|
+
const { spec, ...proposalRest } = proposal;
|
|
109
|
+
return {
|
|
110
|
+
...rest,
|
|
111
|
+
proposal: {
|
|
112
|
+
...proposalRest,
|
|
113
|
+
specMeta: spec?.meta
|
|
114
|
+
},
|
|
115
|
+
createdAt: suggestion.createdAt.toISOString(),
|
|
116
|
+
intent: {
|
|
117
|
+
...suggestion.intent,
|
|
118
|
+
confidence: { ...suggestion.intent.confidence },
|
|
119
|
+
evidence: suggestion.intent.evidence
|
|
120
|
+
}
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
//#endregion
|
|
125
|
+
export { FileSystemSuggestionWriter, InMemorySpecSuggestionRepository, SpecSuggestionOrchestrator };
|
|
@@ -1,19 +1,185 @@
|
|
|
1
|
-
import{randomUUID
|
|
1
|
+
import { randomUUID } from "node:crypto";
|
|
2
|
+
import { Output, generateText } from "ai";
|
|
3
|
+
import * as z from "zod";
|
|
4
|
+
|
|
5
|
+
//#region src/generator/ai-spec-generator.ts
|
|
6
|
+
/**
|
|
7
|
+
* Zod schema for AI-generated spec suggestions.
|
|
8
|
+
*/
|
|
9
|
+
const SpecSuggestionProposalSchema = z.object({
|
|
10
|
+
summary: z.string().describe("Brief summary of the proposed change"),
|
|
11
|
+
rationale: z.string().describe("Detailed explanation of why this change is needed"),
|
|
12
|
+
changeType: z.enum([
|
|
13
|
+
"new-spec",
|
|
14
|
+
"revision",
|
|
15
|
+
"policy-update",
|
|
16
|
+
"schema-update"
|
|
17
|
+
]).describe("Type of change being proposed"),
|
|
18
|
+
recommendedActions: z.array(z.string()).describe("List of specific actions to implement the change"),
|
|
19
|
+
estimatedImpact: z.enum([
|
|
20
|
+
"low",
|
|
21
|
+
"medium",
|
|
22
|
+
"high"
|
|
23
|
+
]).describe("Estimated impact of implementing this change"),
|
|
24
|
+
riskLevel: z.enum([
|
|
25
|
+
"low",
|
|
26
|
+
"medium",
|
|
27
|
+
"high"
|
|
28
|
+
]).describe("Risk level associated with this change"),
|
|
29
|
+
diff: z.string().optional().describe("Optional diff or code snippet showing the change")
|
|
30
|
+
});
|
|
31
|
+
/**
|
|
32
|
+
* AI-powered spec generator using AI SDK v6.
|
|
33
|
+
*
|
|
34
|
+
* Uses structured output (Output.object) to generate
|
|
35
|
+
* well-formed spec suggestions from intent patterns.
|
|
36
|
+
*/
|
|
37
|
+
var AISpecGenerator = class {
|
|
38
|
+
model;
|
|
39
|
+
config;
|
|
40
|
+
systemPrompt;
|
|
41
|
+
constructor(options) {
|
|
42
|
+
this.model = options.model;
|
|
43
|
+
this.config = options.evolutionConfig ?? {};
|
|
44
|
+
this.systemPrompt = options.systemPrompt ?? `You are a ContractSpec evolution expert. Your role is to analyze telemetry data, anomalies, and usage patterns to suggest improvements to API contracts and specifications.
|
|
2
45
|
|
|
3
46
|
When generating suggestions:
|
|
4
47
|
1. Be specific and actionable
|
|
5
48
|
2. Consider backwards compatibility
|
|
6
49
|
3. Prioritize stability and reliability
|
|
7
50
|
4. Explain the rationale clearly
|
|
8
|
-
5. Estimate impact and risk accurately
|
|
51
|
+
5. Estimate impact and risk accurately`;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Generate a spec suggestion from an intent pattern using AI.
|
|
55
|
+
*/
|
|
56
|
+
async generateFromIntent(intent, options = {}) {
|
|
57
|
+
const prompt = this.buildPrompt(intent, options);
|
|
58
|
+
const { output } = await generateText({
|
|
59
|
+
model: this.model,
|
|
60
|
+
system: this.systemPrompt,
|
|
61
|
+
prompt,
|
|
62
|
+
output: Output.object({ schema: SpecSuggestionProposalSchema })
|
|
63
|
+
});
|
|
64
|
+
return this.buildSuggestion(intent, output);
|
|
65
|
+
}
|
|
66
|
+
/**
|
|
67
|
+
* Generate multiple suggestions for a batch of intents.
|
|
68
|
+
*/
|
|
69
|
+
async generateBatch(intents, options = {}) {
|
|
70
|
+
const maxConcurrent = options.maxConcurrent ?? 3;
|
|
71
|
+
const results = [];
|
|
72
|
+
for (let i = 0; i < intents.length; i += maxConcurrent) {
|
|
73
|
+
const batch = intents.slice(i, i + maxConcurrent);
|
|
74
|
+
const batchResults = await Promise.all(batch.map((intent) => this.generateFromIntent(intent)));
|
|
75
|
+
results.push(...batchResults);
|
|
76
|
+
}
|
|
77
|
+
return results;
|
|
78
|
+
}
|
|
79
|
+
/**
|
|
80
|
+
* Validate and enhance an existing suggestion using AI.
|
|
81
|
+
*/
|
|
82
|
+
async enhanceSuggestion(suggestion) {
|
|
83
|
+
const prompt = `Review and enhance this spec suggestion:
|
|
9
84
|
|
|
10
|
-
Intent: ${
|
|
11
|
-
Current Summary: ${
|
|
12
|
-
Current Rationale: ${
|
|
85
|
+
Intent: ${suggestion.intent.type} - ${suggestion.intent.description}
|
|
86
|
+
Current Summary: ${suggestion.proposal.summary}
|
|
87
|
+
Current Rationale: ${suggestion.proposal.rationale}
|
|
13
88
|
|
|
14
89
|
Evidence:
|
|
15
|
-
${
|
|
16
|
-
|
|
90
|
+
${suggestion.evidence.map((e) => `- ${e.type}: ${e.description}`).join("\n")}
|
|
91
|
+
|
|
92
|
+
Please provide an improved version with more specific recommendations.`;
|
|
93
|
+
const { output } = await generateText({
|
|
94
|
+
model: this.model,
|
|
95
|
+
system: this.systemPrompt,
|
|
96
|
+
prompt,
|
|
97
|
+
output: Output.object({ schema: SpecSuggestionProposalSchema })
|
|
98
|
+
});
|
|
99
|
+
return {
|
|
100
|
+
...suggestion,
|
|
101
|
+
proposal: {
|
|
102
|
+
...suggestion.proposal,
|
|
103
|
+
summary: output.summary,
|
|
104
|
+
rationale: output.rationale,
|
|
105
|
+
changeType: output.changeType,
|
|
106
|
+
diff: output.diff,
|
|
107
|
+
metadata: {
|
|
108
|
+
...suggestion.proposal.metadata,
|
|
109
|
+
aiEnhanced: true,
|
|
110
|
+
recommendedActions: output.recommendedActions,
|
|
111
|
+
estimatedImpact: output.estimatedImpact,
|
|
112
|
+
riskLevel: output.riskLevel
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
buildPrompt(intent, options) {
|
|
118
|
+
const parts = [
|
|
119
|
+
`Analyze this intent pattern and generate a spec suggestion:`,
|
|
120
|
+
``,
|
|
121
|
+
`Intent Type: ${intent.type}`,
|
|
122
|
+
`Description: ${intent.description}`,
|
|
123
|
+
`Confidence: ${(intent.confidence.score * 100).toFixed(0)}% (sample size: ${intent.confidence.sampleSize})`
|
|
124
|
+
];
|
|
125
|
+
if (intent.operation) parts.push(`Operation: ${intent.operation.name} v${intent.operation.version}`);
|
|
126
|
+
if (intent.evidence.length > 0) {
|
|
127
|
+
parts.push(``, `Evidence:`);
|
|
128
|
+
for (const evidence of intent.evidence) parts.push(`- [${evidence.type}] ${evidence.description}`);
|
|
129
|
+
}
|
|
130
|
+
if (intent.metadata) parts.push(``, `Metadata: ${JSON.stringify(intent.metadata, null, 2)}`);
|
|
131
|
+
if (options.existingSpec) parts.push(``, `Existing Spec:`, "```json", JSON.stringify(options.existingSpec, null, 2), "```");
|
|
132
|
+
if (options.additionalContext) parts.push(``, `Additional Context:`, options.additionalContext);
|
|
133
|
+
return parts.join("\n");
|
|
134
|
+
}
|
|
135
|
+
buildSuggestion(intent, aiOutput) {
|
|
136
|
+
const now = /* @__PURE__ */ new Date();
|
|
137
|
+
const proposal = {
|
|
138
|
+
summary: aiOutput.summary,
|
|
139
|
+
rationale: aiOutput.rationale,
|
|
140
|
+
changeType: aiOutput.changeType,
|
|
141
|
+
diff: aiOutput.diff,
|
|
142
|
+
metadata: {
|
|
143
|
+
aiGenerated: true,
|
|
144
|
+
recommendedActions: aiOutput.recommendedActions,
|
|
145
|
+
estimatedImpact: aiOutput.estimatedImpact,
|
|
146
|
+
riskLevel: aiOutput.riskLevel
|
|
147
|
+
}
|
|
148
|
+
};
|
|
149
|
+
return {
|
|
150
|
+
id: randomUUID(),
|
|
151
|
+
intent,
|
|
152
|
+
target: intent.operation,
|
|
153
|
+
proposal,
|
|
154
|
+
confidence: intent.confidence.score,
|
|
155
|
+
priority: this.calculatePriority(intent, aiOutput),
|
|
156
|
+
createdAt: now,
|
|
157
|
+
createdBy: "ai-spec-generator",
|
|
158
|
+
status: this.determineInitialStatus(intent),
|
|
159
|
+
evidence: intent.evidence,
|
|
160
|
+
tags: ["ai-generated", intent.type]
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
calculatePriority(intent, aiOutput) {
|
|
164
|
+
const impactScore = aiOutput.estimatedImpact === "high" ? 1 : aiOutput.estimatedImpact === "medium" ? .5 : .25;
|
|
165
|
+
const intentScore = intent.confidence.score;
|
|
166
|
+
const urgency = intent.type === "error-spike" ? .3 : intent.type === "latency-regression" ? .2 : 0;
|
|
167
|
+
const combined = impactScore * .4 + intentScore * .4 + urgency;
|
|
168
|
+
if (combined >= .7) return "high";
|
|
169
|
+
if (combined >= .4) return "medium";
|
|
170
|
+
return "low";
|
|
171
|
+
}
|
|
172
|
+
determineInitialStatus(intent) {
|
|
173
|
+
if (this.config.autoApproveThreshold && intent.confidence.score >= this.config.autoApproveThreshold && !this.config.requireApproval) return "approved";
|
|
174
|
+
return "pending";
|
|
175
|
+
}
|
|
176
|
+
};
|
|
177
|
+
/**
|
|
178
|
+
* Create an AI-powered spec generator.
|
|
179
|
+
*/
|
|
180
|
+
function createAISpecGenerator(config) {
|
|
181
|
+
return new AISpecGenerator(config);
|
|
182
|
+
}
|
|
17
183
|
|
|
18
|
-
|
|
19
|
-
|
|
184
|
+
//#endregion
|
|
185
|
+
export { AISpecGenerator, createAISpecGenerator };
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { EvolutionConfig, IntentPattern, OperationCoordinate, SpecSuggestion, SpecSuggestionProposal, SuggestionStatus } from "../types.js";
|
|
2
|
-
import { Logger } from "@lssm/lib.observability";
|
|
3
2
|
import { ContractSpec, ResourceRefDescriptor } from "@lssm/lib.contracts";
|
|
4
3
|
import { AnySchemaModel } from "@lssm/lib.schema";
|
|
4
|
+
import { Logger } from "@lssm/lib.observability";
|
|
5
5
|
|
|
6
6
|
//#region src/generator/spec-generator.d.ts
|
|
7
7
|
type AnyContract = ContractSpec<AnySchemaModel, AnySchemaModel | ResourceRefDescriptor<boolean>>;
|
|
@@ -1 +1,120 @@
|
|
|
1
|
-
import
|
|
1
|
+
import "../observability/dist/index.js";
|
|
2
|
+
import { randomUUID } from "node:crypto";
|
|
3
|
+
|
|
4
|
+
//#region src/generator/spec-generator.ts
|
|
5
|
+
var SpecGenerator = class {
|
|
6
|
+
config;
|
|
7
|
+
logger;
|
|
8
|
+
clock;
|
|
9
|
+
getSpec;
|
|
10
|
+
constructor(options = {}) {
|
|
11
|
+
this.config = options.config ?? {};
|
|
12
|
+
this.logger = options.logger;
|
|
13
|
+
this.clock = options.clock ?? (() => /* @__PURE__ */ new Date());
|
|
14
|
+
this.getSpec = options.getSpec;
|
|
15
|
+
}
|
|
16
|
+
generateFromIntent(intent, options = {}) {
|
|
17
|
+
const now = this.clock();
|
|
18
|
+
const summary = options.summary ?? `${this.intentToVerb(intent.type)} ${intent.operation?.name ?? "operation"}`;
|
|
19
|
+
const rationale = options.rationale ?? [intent.description, intent.metadata?.observedValue ? `Observed ${intent.metadata.observedValue}` : void 0].filter(Boolean).join(" — ");
|
|
20
|
+
return {
|
|
21
|
+
id: randomUUID(),
|
|
22
|
+
intent,
|
|
23
|
+
target: intent.operation,
|
|
24
|
+
proposal: {
|
|
25
|
+
summary,
|
|
26
|
+
rationale,
|
|
27
|
+
changeType: options.changeType ?? this.inferChangeType(intent),
|
|
28
|
+
kind: options.kind,
|
|
29
|
+
spec: options.spec,
|
|
30
|
+
diff: options.diff,
|
|
31
|
+
metadata: options.metadata
|
|
32
|
+
},
|
|
33
|
+
confidence: intent.confidence.score,
|
|
34
|
+
priority: this.intentToPriority(intent),
|
|
35
|
+
createdAt: now,
|
|
36
|
+
createdBy: options.createdBy ?? "auto-evolution",
|
|
37
|
+
status: options.status ?? "pending",
|
|
38
|
+
evidence: intent.evidence,
|
|
39
|
+
tags: options.tags
|
|
40
|
+
};
|
|
41
|
+
}
|
|
42
|
+
generateVariant(operation, patch, intent, options = {}) {
|
|
43
|
+
if (!this.getSpec) throw new Error("SpecGenerator requires getSpec() to generate variants");
|
|
44
|
+
const base = this.getSpec(operation.name, operation.version);
|
|
45
|
+
if (!base) throw new Error(`Cannot generate variant; spec ${operation.name}.v${operation.version} not found`);
|
|
46
|
+
const merged = mergeContract(base, patch);
|
|
47
|
+
return this.generateFromIntent(intent, {
|
|
48
|
+
...options,
|
|
49
|
+
spec: merged
|
|
50
|
+
});
|
|
51
|
+
}
|
|
52
|
+
validateSuggestion(suggestion, config = this.config) {
|
|
53
|
+
const reasons = [];
|
|
54
|
+
if (config.minConfidence != null && suggestion.confidence < config.minConfidence) reasons.push(`Confidence ${suggestion.confidence.toFixed(2)} below minimum ${config.minConfidence}`);
|
|
55
|
+
if (config.requireApproval && suggestion.status === "approved") reasons.push("Suggestion cannot be auto-approved when approval is required");
|
|
56
|
+
if (suggestion.proposal.spec && !suggestion.proposal.spec.meta?.name) reasons.push("Proposal spec must include meta.name");
|
|
57
|
+
if (!suggestion.proposal.summary) reasons.push("Proposal summary is required");
|
|
58
|
+
const ok = reasons.length === 0;
|
|
59
|
+
if (!ok) this.logger?.warn("SpecGenerator.validateSuggestion.failed", {
|
|
60
|
+
suggestionId: suggestion.id,
|
|
61
|
+
reasons
|
|
62
|
+
});
|
|
63
|
+
return {
|
|
64
|
+
ok,
|
|
65
|
+
reasons
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
intentToVerb(intent) {
|
|
69
|
+
switch (intent) {
|
|
70
|
+
case "error-spike": return "Stabilize";
|
|
71
|
+
case "latency-regression": return "Optimize";
|
|
72
|
+
case "missing-operation": return "Introduce";
|
|
73
|
+
case "throughput-drop": return "Rebalance";
|
|
74
|
+
default: return "Adjust";
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
intentToPriority(intent) {
|
|
78
|
+
const severity = intent.confidence.score;
|
|
79
|
+
if (intent.type === "error-spike" || severity >= .8) return "high";
|
|
80
|
+
if (severity >= .5) return "medium";
|
|
81
|
+
return "low";
|
|
82
|
+
}
|
|
83
|
+
inferChangeType(intent) {
|
|
84
|
+
switch (intent.type) {
|
|
85
|
+
case "missing-operation": return "new-spec";
|
|
86
|
+
case "schema-mismatch": return "schema-update";
|
|
87
|
+
case "error-spike": return "policy-update";
|
|
88
|
+
default: return "revision";
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
function mergeContract(base, patch) {
|
|
93
|
+
return {
|
|
94
|
+
...base,
|
|
95
|
+
...patch,
|
|
96
|
+
meta: {
|
|
97
|
+
...base.meta,
|
|
98
|
+
...patch.meta
|
|
99
|
+
},
|
|
100
|
+
io: {
|
|
101
|
+
...base.io,
|
|
102
|
+
...patch.io
|
|
103
|
+
},
|
|
104
|
+
policy: {
|
|
105
|
+
...base.policy,
|
|
106
|
+
...patch.policy
|
|
107
|
+
},
|
|
108
|
+
telemetry: {
|
|
109
|
+
...base.telemetry,
|
|
110
|
+
...patch.telemetry
|
|
111
|
+
},
|
|
112
|
+
sideEffects: {
|
|
113
|
+
...base.sideEffects,
|
|
114
|
+
...patch.sideEffects
|
|
115
|
+
}
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
//#endregion
|
|
120
|
+
export { SpecGenerator };
|
package/dist/index.js
CHANGED
|
@@ -1 +1,6 @@
|
|
|
1
|
-
import{SpecAnalyzer
|
|
1
|
+
import { SpecAnalyzer } from "./analyzer/spec-analyzer.js";
|
|
2
|
+
import { SpecGenerator } from "./generator/spec-generator.js";
|
|
3
|
+
import { AISpecGenerator, createAISpecGenerator } from "./generator/ai-spec-generator.js";
|
|
4
|
+
import { FileSystemSuggestionWriter, InMemorySpecSuggestionRepository, SpecSuggestionOrchestrator } from "./approval/integration.js";
|
|
5
|
+
|
|
6
|
+
export { AISpecGenerator, FileSystemSuggestionWriter, InMemorySpecSuggestionRepository, SpecAnalyzer, SpecGenerator, SpecSuggestionOrchestrator, createAISpecGenerator };
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import "./stages.js";
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import "./stages.js";
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
//#region ../lifecycle/dist/types/stages.js
|
|
2
|
+
let LifecycleStage = /* @__PURE__ */ function(LifecycleStage$1) {
|
|
3
|
+
LifecycleStage$1[LifecycleStage$1["Exploration"] = 0] = "Exploration";
|
|
4
|
+
LifecycleStage$1[LifecycleStage$1["ProblemSolutionFit"] = 1] = "ProblemSolutionFit";
|
|
5
|
+
LifecycleStage$1[LifecycleStage$1["MvpEarlyTraction"] = 2] = "MvpEarlyTraction";
|
|
6
|
+
LifecycleStage$1[LifecycleStage$1["ProductMarketFit"] = 3] = "ProductMarketFit";
|
|
7
|
+
LifecycleStage$1[LifecycleStage$1["GrowthScaleUp"] = 4] = "GrowthScaleUp";
|
|
8
|
+
LifecycleStage$1[LifecycleStage$1["ExpansionPlatform"] = 5] = "ExpansionPlatform";
|
|
9
|
+
LifecycleStage$1[LifecycleStage$1["MaturityRenewal"] = 6] = "MaturityRenewal";
|
|
10
|
+
return LifecycleStage$1;
|
|
11
|
+
}({});
|
|
12
|
+
const LIFECYCLE_STAGE_ORDER = [
|
|
13
|
+
LifecycleStage.Exploration,
|
|
14
|
+
LifecycleStage.ProblemSolutionFit,
|
|
15
|
+
LifecycleStage.MvpEarlyTraction,
|
|
16
|
+
LifecycleStage.ProductMarketFit,
|
|
17
|
+
LifecycleStage.GrowthScaleUp,
|
|
18
|
+
LifecycleStage.ExpansionPlatform,
|
|
19
|
+
LifecycleStage.MaturityRenewal
|
|
20
|
+
];
|
|
21
|
+
const LIFECYCLE_STAGE_META = {
|
|
22
|
+
[LifecycleStage.Exploration]: {
|
|
23
|
+
id: LifecycleStage.Exploration,
|
|
24
|
+
order: 0,
|
|
25
|
+
slug: "exploration",
|
|
26
|
+
name: "Exploration / Ideation",
|
|
27
|
+
question: "Is there a problem worth my time?",
|
|
28
|
+
signals: [
|
|
29
|
+
"20+ discovery interviews",
|
|
30
|
+
"Clear problem statement",
|
|
31
|
+
"Named ICP"
|
|
32
|
+
],
|
|
33
|
+
traps: ["Branding before discovery", "Premature tooling decisions"],
|
|
34
|
+
focusAreas: [
|
|
35
|
+
"Customer discovery",
|
|
36
|
+
"Problem definition",
|
|
37
|
+
"Segment clarity"
|
|
38
|
+
]
|
|
39
|
+
},
|
|
40
|
+
[LifecycleStage.ProblemSolutionFit]: {
|
|
41
|
+
id: LifecycleStage.ProblemSolutionFit,
|
|
42
|
+
order: 1,
|
|
43
|
+
slug: "problem-solution-fit",
|
|
44
|
+
name: "Problem–Solution Fit",
|
|
45
|
+
question: "Do people care enough about this solution?",
|
|
46
|
+
signals: [
|
|
47
|
+
"Prototype reuse",
|
|
48
|
+
"Referral energy",
|
|
49
|
+
"Pre-pay interest"
|
|
50
|
+
],
|
|
51
|
+
traps: ["“Market is huge” without users", "Skipping qualitative loops"],
|
|
52
|
+
focusAreas: [
|
|
53
|
+
"Solution hypothesis",
|
|
54
|
+
"Value messaging",
|
|
55
|
+
"Feedback capture"
|
|
56
|
+
]
|
|
57
|
+
},
|
|
58
|
+
[LifecycleStage.MvpEarlyTraction]: {
|
|
59
|
+
id: LifecycleStage.MvpEarlyTraction,
|
|
60
|
+
order: 2,
|
|
61
|
+
slug: "mvp-early-traction",
|
|
62
|
+
name: "MVP & Early Traction",
|
|
63
|
+
question: "Can we get real usage and learn fast?",
|
|
64
|
+
signals: [
|
|
65
|
+
"20–50 named active users",
|
|
66
|
+
"Weekly releases",
|
|
67
|
+
"Noisy feedback"
|
|
68
|
+
],
|
|
69
|
+
traps: ["Overbuilt infra for 10 users", "Undefined retention metric"],
|
|
70
|
+
focusAreas: [
|
|
71
|
+
"Activation",
|
|
72
|
+
"Cohort tracking",
|
|
73
|
+
"Feedback rituals"
|
|
74
|
+
]
|
|
75
|
+
},
|
|
76
|
+
[LifecycleStage.ProductMarketFit]: {
|
|
77
|
+
id: LifecycleStage.ProductMarketFit,
|
|
78
|
+
order: 3,
|
|
79
|
+
slug: "product-market-fit",
|
|
80
|
+
name: "Product–Market Fit",
|
|
81
|
+
question: "Is this pulling us forward?",
|
|
82
|
+
signals: [
|
|
83
|
+
"Retention without heroics",
|
|
84
|
+
"Organic word-of-mouth",
|
|
85
|
+
"Value stories"
|
|
86
|
+
],
|
|
87
|
+
traps: ["Hero growth that does not scale", "Ignoring churn signals"],
|
|
88
|
+
focusAreas: [
|
|
89
|
+
"Retention",
|
|
90
|
+
"Reliability",
|
|
91
|
+
"ICP clarity"
|
|
92
|
+
]
|
|
93
|
+
},
|
|
94
|
+
[LifecycleStage.GrowthScaleUp]: {
|
|
95
|
+
id: LifecycleStage.GrowthScaleUp,
|
|
96
|
+
order: 4,
|
|
97
|
+
slug: "growth-scale-up",
|
|
98
|
+
name: "Growth / Scale-up",
|
|
99
|
+
question: "Can we grow this repeatably?",
|
|
100
|
+
signals: [
|
|
101
|
+
"Predictable channels",
|
|
102
|
+
"Specialized hires",
|
|
103
|
+
"Unit economics on track"
|
|
104
|
+
],
|
|
105
|
+
traps: ["Paid spend masking retention gaps", "Infra debt blocking launches"],
|
|
106
|
+
focusAreas: [
|
|
107
|
+
"Ops systems",
|
|
108
|
+
"Growth loops",
|
|
109
|
+
"Reliability engineering"
|
|
110
|
+
]
|
|
111
|
+
},
|
|
112
|
+
[LifecycleStage.ExpansionPlatform]: {
|
|
113
|
+
id: LifecycleStage.ExpansionPlatform,
|
|
114
|
+
order: 5,
|
|
115
|
+
slug: "expansion-platform",
|
|
116
|
+
name: "Expansion / Platform",
|
|
117
|
+
question: "What is the next growth curve?",
|
|
118
|
+
signals: [
|
|
119
|
+
"Stable core metrics",
|
|
120
|
+
"Partner/API demand",
|
|
121
|
+
"Ecosystem pull"
|
|
122
|
+
],
|
|
123
|
+
traps: ["Platform theater before wedge is solid"],
|
|
124
|
+
focusAreas: [
|
|
125
|
+
"Partnerships",
|
|
126
|
+
"APIs",
|
|
127
|
+
"New market validation"
|
|
128
|
+
]
|
|
129
|
+
},
|
|
130
|
+
[LifecycleStage.MaturityRenewal]: {
|
|
131
|
+
id: LifecycleStage.MaturityRenewal,
|
|
132
|
+
order: 6,
|
|
133
|
+
slug: "maturity-renewal",
|
|
134
|
+
name: "Maturity / Renewal",
|
|
135
|
+
question: "Optimize, reinvent, or sunset?",
|
|
136
|
+
signals: [
|
|
137
|
+
"Margin focus",
|
|
138
|
+
"Portfolio bets",
|
|
139
|
+
"Narrative refresh"
|
|
140
|
+
],
|
|
141
|
+
traps: ["Assuming past success is enough"],
|
|
142
|
+
focusAreas: [
|
|
143
|
+
"Cost optimization",
|
|
144
|
+
"Reinvention bets",
|
|
145
|
+
"Sunset planning"
|
|
146
|
+
]
|
|
147
|
+
}
|
|
148
|
+
};
|
|
149
|
+
|
|
150
|
+
//#endregion
|
|
151
|
+
export { LifecycleStage };
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import "../types/stages.js";
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import "./tracing/index.js";
|
|
2
|
+
import { createCounter, createHistogram, getMeter } from "./metrics/index.js";
|
|
3
|
+
import { Logger } from "./logging/index.js";
|
|
4
|
+
import "./tracing/middleware.js";
|
|
5
|
+
import "./intent/aggregator.js";
|
|
6
|
+
import "./intent/detector.js";
|
|
7
|
+
import "./pipeline/evolution-pipeline.js";
|
|
8
|
+
import "./pipeline/lifecycle-pipeline.js";
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import { randomUUID } from "node:crypto";
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import "./stages.js";
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import "./stages.js";
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
//#region ../observability/dist/lifecycle/dist/types/stages.mjs
|
|
2
|
+
let LifecycleStage = /* @__PURE__ */ function(LifecycleStage$1) {
|
|
3
|
+
LifecycleStage$1[LifecycleStage$1["Exploration"] = 0] = "Exploration";
|
|
4
|
+
LifecycleStage$1[LifecycleStage$1["ProblemSolutionFit"] = 1] = "ProblemSolutionFit";
|
|
5
|
+
LifecycleStage$1[LifecycleStage$1["MvpEarlyTraction"] = 2] = "MvpEarlyTraction";
|
|
6
|
+
LifecycleStage$1[LifecycleStage$1["ProductMarketFit"] = 3] = "ProductMarketFit";
|
|
7
|
+
LifecycleStage$1[LifecycleStage$1["GrowthScaleUp"] = 4] = "GrowthScaleUp";
|
|
8
|
+
LifecycleStage$1[LifecycleStage$1["ExpansionPlatform"] = 5] = "ExpansionPlatform";
|
|
9
|
+
LifecycleStage$1[LifecycleStage$1["MaturityRenewal"] = 6] = "MaturityRenewal";
|
|
10
|
+
return LifecycleStage$1;
|
|
11
|
+
}({});
|
|
12
|
+
LifecycleStage.Exploration, LifecycleStage.ProblemSolutionFit, LifecycleStage.MvpEarlyTraction, LifecycleStage.ProductMarketFit, LifecycleStage.GrowthScaleUp, LifecycleStage.ExpansionPlatform, LifecycleStage.MaturityRenewal;
|
|
13
|
+
const LIFECYCLE_STAGE_META = {
|
|
14
|
+
[LifecycleStage.Exploration]: {
|
|
15
|
+
id: LifecycleStage.Exploration,
|
|
16
|
+
order: 0,
|
|
17
|
+
slug: "exploration",
|
|
18
|
+
name: "Exploration / Ideation",
|
|
19
|
+
question: "Is there a problem worth my time?",
|
|
20
|
+
signals: [
|
|
21
|
+
"20+ discovery interviews",
|
|
22
|
+
"Clear problem statement",
|
|
23
|
+
"Named ICP"
|
|
24
|
+
],
|
|
25
|
+
traps: ["Branding before discovery", "Premature tooling decisions"],
|
|
26
|
+
focusAreas: [
|
|
27
|
+
"Customer discovery",
|
|
28
|
+
"Problem definition",
|
|
29
|
+
"Segment clarity"
|
|
30
|
+
]
|
|
31
|
+
},
|
|
32
|
+
[LifecycleStage.ProblemSolutionFit]: {
|
|
33
|
+
id: LifecycleStage.ProblemSolutionFit,
|
|
34
|
+
order: 1,
|
|
35
|
+
slug: "problem-solution-fit",
|
|
36
|
+
name: "Problem–Solution Fit",
|
|
37
|
+
question: "Do people care enough about this solution?",
|
|
38
|
+
signals: [
|
|
39
|
+
"Prototype reuse",
|
|
40
|
+
"Referral energy",
|
|
41
|
+
"Pre-pay interest"
|
|
42
|
+
],
|
|
43
|
+
traps: ["“Market is huge” without users", "Skipping qualitative loops"],
|
|
44
|
+
focusAreas: [
|
|
45
|
+
"Solution hypothesis",
|
|
46
|
+
"Value messaging",
|
|
47
|
+
"Feedback capture"
|
|
48
|
+
]
|
|
49
|
+
},
|
|
50
|
+
[LifecycleStage.MvpEarlyTraction]: {
|
|
51
|
+
id: LifecycleStage.MvpEarlyTraction,
|
|
52
|
+
order: 2,
|
|
53
|
+
slug: "mvp-early-traction",
|
|
54
|
+
name: "MVP & Early Traction",
|
|
55
|
+
question: "Can we get real usage and learn fast?",
|
|
56
|
+
signals: [
|
|
57
|
+
"20–50 named active users",
|
|
58
|
+
"Weekly releases",
|
|
59
|
+
"Noisy feedback"
|
|
60
|
+
],
|
|
61
|
+
traps: ["Overbuilt infra for 10 users", "Undefined retention metric"],
|
|
62
|
+
focusAreas: [
|
|
63
|
+
"Activation",
|
|
64
|
+
"Cohort tracking",
|
|
65
|
+
"Feedback rituals"
|
|
66
|
+
]
|
|
67
|
+
},
|
|
68
|
+
[LifecycleStage.ProductMarketFit]: {
|
|
69
|
+
id: LifecycleStage.ProductMarketFit,
|
|
70
|
+
order: 3,
|
|
71
|
+
slug: "product-market-fit",
|
|
72
|
+
name: "Product–Market Fit",
|
|
73
|
+
question: "Is this pulling us forward?",
|
|
74
|
+
signals: [
|
|
75
|
+
"Retention without heroics",
|
|
76
|
+
"Organic word-of-mouth",
|
|
77
|
+
"Value stories"
|
|
78
|
+
],
|
|
79
|
+
traps: ["Hero growth that does not scale", "Ignoring churn signals"],
|
|
80
|
+
focusAreas: [
|
|
81
|
+
"Retention",
|
|
82
|
+
"Reliability",
|
|
83
|
+
"ICP clarity"
|
|
84
|
+
]
|
|
85
|
+
},
|
|
86
|
+
[LifecycleStage.GrowthScaleUp]: {
|
|
87
|
+
id: LifecycleStage.GrowthScaleUp,
|
|
88
|
+
order: 4,
|
|
89
|
+
slug: "growth-scale-up",
|
|
90
|
+
name: "Growth / Scale-up",
|
|
91
|
+
question: "Can we grow this repeatably?",
|
|
92
|
+
signals: [
|
|
93
|
+
"Predictable channels",
|
|
94
|
+
"Specialized hires",
|
|
95
|
+
"Unit economics on track"
|
|
96
|
+
],
|
|
97
|
+
traps: ["Paid spend masking retention gaps", "Infra debt blocking launches"],
|
|
98
|
+
focusAreas: [
|
|
99
|
+
"Ops systems",
|
|
100
|
+
"Growth loops",
|
|
101
|
+
"Reliability engineering"
|
|
102
|
+
]
|
|
103
|
+
},
|
|
104
|
+
[LifecycleStage.ExpansionPlatform]: {
|
|
105
|
+
id: LifecycleStage.ExpansionPlatform,
|
|
106
|
+
order: 5,
|
|
107
|
+
slug: "expansion-platform",
|
|
108
|
+
name: "Expansion / Platform",
|
|
109
|
+
question: "What is the next growth curve?",
|
|
110
|
+
signals: [
|
|
111
|
+
"Stable core metrics",
|
|
112
|
+
"Partner/API demand",
|
|
113
|
+
"Ecosystem pull"
|
|
114
|
+
],
|
|
115
|
+
traps: ["Platform theater before wedge is solid"],
|
|
116
|
+
focusAreas: [
|
|
117
|
+
"Partnerships",
|
|
118
|
+
"APIs",
|
|
119
|
+
"New market validation"
|
|
120
|
+
]
|
|
121
|
+
},
|
|
122
|
+
[LifecycleStage.MaturityRenewal]: {
|
|
123
|
+
id: LifecycleStage.MaturityRenewal,
|
|
124
|
+
order: 6,
|
|
125
|
+
slug: "maturity-renewal",
|
|
126
|
+
name: "Maturity / Renewal",
|
|
127
|
+
question: "Optimize, reinvent, or sunset?",
|
|
128
|
+
signals: [
|
|
129
|
+
"Margin focus",
|
|
130
|
+
"Portfolio bets",
|
|
131
|
+
"Narrative refresh"
|
|
132
|
+
],
|
|
133
|
+
traps: ["Assuming past success is enough"],
|
|
134
|
+
focusAreas: [
|
|
135
|
+
"Cost optimization",
|
|
136
|
+
"Reinvention bets",
|
|
137
|
+
"Sunset planning"
|
|
138
|
+
]
|
|
139
|
+
}
|
|
140
|
+
};
|
|
141
|
+
|
|
142
|
+
//#endregion
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import "../types/stages.js";
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import { context, trace } from "@opentelemetry/api";
|
|
2
|
+
|
|
3
|
+
//#region ../observability/dist/logging/index.mjs
|
|
4
|
+
var Logger = class {
|
|
5
|
+
constructor(serviceName) {
|
|
6
|
+
this.serviceName = serviceName;
|
|
7
|
+
}
|
|
8
|
+
log(level, message, meta = {}) {
|
|
9
|
+
const span = trace.getSpan(context.active());
|
|
10
|
+
const traceId = span?.spanContext().traceId;
|
|
11
|
+
const spanId = span?.spanContext().spanId;
|
|
12
|
+
const entry = {
|
|
13
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
14
|
+
service: this.serviceName,
|
|
15
|
+
level,
|
|
16
|
+
message,
|
|
17
|
+
traceId,
|
|
18
|
+
spanId,
|
|
19
|
+
...meta
|
|
20
|
+
};
|
|
21
|
+
console.log(JSON.stringify(entry));
|
|
22
|
+
}
|
|
23
|
+
debug(message, meta) {
|
|
24
|
+
this.log("debug", message, meta);
|
|
25
|
+
}
|
|
26
|
+
info(message, meta) {
|
|
27
|
+
this.log("info", message, meta);
|
|
28
|
+
}
|
|
29
|
+
warn(message, meta) {
|
|
30
|
+
this.log("warn", message, meta);
|
|
31
|
+
}
|
|
32
|
+
error(message, meta) {
|
|
33
|
+
this.log("error", message, meta);
|
|
34
|
+
}
|
|
35
|
+
};
|
|
36
|
+
const logger = new Logger(process.env.OTEL_SERVICE_NAME || "unknown-service");
|
|
37
|
+
|
|
38
|
+
//#endregion
|
|
39
|
+
export { Logger };
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { metrics } from "@opentelemetry/api";
|
|
2
|
+
|
|
3
|
+
//#region ../observability/dist/metrics/index.mjs
|
|
4
|
+
const DEFAULT_METER_NAME = "@lssm/lib.observability";
|
|
5
|
+
function getMeter(name = DEFAULT_METER_NAME) {
|
|
6
|
+
return metrics.getMeter(name);
|
|
7
|
+
}
|
|
8
|
+
function createCounter(name, description, meterName) {
|
|
9
|
+
return getMeter(meterName).createCounter(name, { description });
|
|
10
|
+
}
|
|
11
|
+
function createHistogram(name, description, meterName) {
|
|
12
|
+
return getMeter(meterName).createHistogram(name, { description });
|
|
13
|
+
}
|
|
14
|
+
const standardMetrics = {
|
|
15
|
+
httpRequests: createCounter("http_requests_total", "Total HTTP requests"),
|
|
16
|
+
httpDuration: createHistogram("http_request_duration_seconds", "HTTP request duration"),
|
|
17
|
+
operationErrors: createCounter("operation_errors_total", "Total operation errors"),
|
|
18
|
+
workflowDuration: createHistogram("workflow_duration_seconds", "Workflow execution duration")
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
//#endregion
|
|
22
|
+
export { createCounter, createHistogram, getMeter };
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import { trace } from "@opentelemetry/api";
|
package/dist/types.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { LifecycleStage } from "@lssm/lib.lifecycle";
|
|
2
1
|
import { ContractSpec, OpKind, ResourceRefDescriptor } from "@lssm/lib.contracts";
|
|
3
2
|
import { AnySchemaModel } from "@lssm/lib.schema";
|
|
3
|
+
import { LifecycleStage } from "@lssm/lib.lifecycle";
|
|
4
4
|
|
|
5
5
|
//#region src/types.d.ts
|
|
6
6
|
type AnomalySeverity = 'low' | 'medium' | 'high';
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lssm/lib.evolution",
|
|
3
|
-
"version": "0.0.0-canary-
|
|
3
|
+
"version": "0.0.0-canary-20251217072406",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"main": "./dist/index.js",
|
|
6
6
|
"module": "./dist/index.js",
|
|
@@ -25,18 +25,18 @@
|
|
|
25
25
|
"dependencies": {
|
|
26
26
|
"ai": "beta",
|
|
27
27
|
"zod": "^4.1.13",
|
|
28
|
-
"@lssm/lib.ai-agent": "0.0.0-canary-
|
|
29
|
-
"@lssm/lib.contracts": "0.0.0-canary-
|
|
30
|
-
"@lssm/lib.lifecycle": "0.0.0-canary-
|
|
31
|
-
"@lssm/lib.observability": "0.0.0-canary-
|
|
32
|
-
"@lssm/lib.schema": "0.0.0-canary-
|
|
28
|
+
"@lssm/lib.ai-agent": "0.0.0-canary-20251217072406",
|
|
29
|
+
"@lssm/lib.contracts": "0.0.0-canary-20251217072406",
|
|
30
|
+
"@lssm/lib.lifecycle": "0.0.0-canary-20251217072406",
|
|
31
|
+
"@lssm/lib.observability": "0.0.0-canary-20251217072406",
|
|
32
|
+
"@lssm/lib.schema": "0.0.0-canary-20251217072406"
|
|
33
33
|
},
|
|
34
34
|
"peerDependencies": {
|
|
35
35
|
"@prisma/client": "7.1.0"
|
|
36
36
|
},
|
|
37
37
|
"devDependencies": {
|
|
38
|
-
"@lssm/tool.tsdown": "0.0.0-canary-
|
|
39
|
-
"@lssm/tool.typescript": "0.0.0-canary-
|
|
38
|
+
"@lssm/tool.tsdown": "0.0.0-canary-20251217072406",
|
|
39
|
+
"@lssm/tool.typescript": "0.0.0-canary-20251217072406",
|
|
40
40
|
"tsdown": "^0.17.4",
|
|
41
41
|
"typescript": "^5.9.3"
|
|
42
42
|
},
|