@diagrammo/dgmo 0.4.2 → 0.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/.claude/skills/dgmo-chart/SKILL.md +28 -0
  2. package/.claude/skills/dgmo-generate/SKILL.md +1 -0
  3. package/.claude/skills/dgmo-sequence/SKILL.md +24 -1
  4. package/.cursorrules +27 -2
  5. package/.github/copilot-instructions.md +36 -3
  6. package/.windsurfrules +27 -2
  7. package/README.md +12 -3
  8. package/dist/cli.cjs +197 -154
  9. package/dist/index.cjs +8371 -3200
  10. package/dist/index.cjs.map +1 -1
  11. package/dist/index.d.cts +502 -58
  12. package/dist/index.d.ts +502 -58
  13. package/dist/index.js +8594 -3444
  14. package/dist/index.js.map +1 -1
  15. package/docs/ai-integration.md +1 -1
  16. package/docs/language-reference.md +336 -17
  17. package/docs/migration-sequence-color-to-tags.md +98 -0
  18. package/package.json +1 -1
  19. package/src/c4/renderer.ts +1 -20
  20. package/src/class/renderer.ts +1 -11
  21. package/src/cli.ts +40 -0
  22. package/src/d3.ts +92 -2
  23. package/src/dgmo-router.ts +11 -0
  24. package/src/echarts.ts +74 -8
  25. package/src/er/parser.ts +29 -3
  26. package/src/er/renderer.ts +1 -15
  27. package/src/graph/flowchart-parser.ts +7 -30
  28. package/src/graph/flowchart-renderer.ts +62 -69
  29. package/src/graph/layout.ts +5 -0
  30. package/src/graph/state-parser.ts +388 -0
  31. package/src/graph/state-renderer.ts +496 -0
  32. package/src/graph/types.ts +4 -2
  33. package/src/index.ts +42 -1
  34. package/src/infra/compute.ts +1113 -0
  35. package/src/infra/layout.ts +575 -0
  36. package/src/infra/parser.ts +559 -0
  37. package/src/infra/renderer.ts +1509 -0
  38. package/src/infra/roles.ts +60 -0
  39. package/src/infra/serialize.ts +67 -0
  40. package/src/infra/types.ts +221 -0
  41. package/src/infra/validation.ts +192 -0
  42. package/src/initiative-status/layout.ts +56 -61
  43. package/src/initiative-status/renderer.ts +13 -13
  44. package/src/kanban/renderer.ts +1 -24
  45. package/src/org/layout.ts +28 -37
  46. package/src/org/parser.ts +16 -1
  47. package/src/org/renderer.ts +159 -121
  48. package/src/org/resolver.ts +90 -23
  49. package/src/palettes/color-utils.ts +30 -0
  50. package/src/render.ts +2 -0
  51. package/src/sequence/parser.ts +202 -42
  52. package/src/sequence/renderer.ts +576 -113
  53. package/src/sequence/tag-resolution.ts +163 -0
  54. package/src/sitemap/collapse.ts +187 -0
  55. package/src/sitemap/layout.ts +738 -0
  56. package/src/sitemap/parser.ts +489 -0
  57. package/src/sitemap/renderer.ts +774 -0
  58. package/src/sitemap/types.ts +42 -0
  59. package/src/utils/tag-groups.ts +119 -0
@@ -0,0 +1,1113 @@
1
+ // ============================================================
2
+ // Infra Chart Computation Engine
3
+ // ============================================================
4
+ //
5
+ // Pure function: computeInfra(parsed, params) → ComputedInfraModel
6
+ //
7
+ // Traverses the DAG from the edge entry point, computing downstream
8
+ // rps through behavior transformations (cache-hit, firewall-block,
9
+ // ratelimit-rps) and split distribution.
10
+
11
+ import type {
12
+ ParsedInfra,
13
+ InfraNode,
14
+ InfraEdge,
15
+ InfraComputeParams,
16
+ ComputedInfraModel,
17
+ ComputedInfraNode,
18
+ ComputedInfraEdge,
19
+ InfraDiagnostic,
20
+ InfraCbState,
21
+ InfraLatencyPercentiles,
22
+ InfraAvailabilityPercentiles,
23
+ InfraProperty,
24
+ } from './types';
25
+ import { INFRA_BEHAVIOR_KEYS } from './types';
26
+
27
+ // ============================================================
28
+ // Helpers
29
+ // ============================================================
30
+
31
+ /** Get a numeric property value from a node, or a default. */
32
+ function getNumProp(node: InfraNode, key: string, fallback: number): number {
33
+ const prop = node.properties.find((p) => p.key === key);
34
+ if (!prop) return fallback;
35
+ if (typeof prop.value === 'number') return prop.value;
36
+ // Try parsing string (e.g. "1-8" is not a simple number)
37
+ const num = parseFloat(String(prop.value));
38
+ return isNaN(num) ? fallback : num;
39
+ }
40
+
41
+ /** Parse instance range "N-M" → {min, max}, or fixed "N" → {min: N, max: N}. */
42
+ function getInstanceRange(node: InfraNode): { min: number; max: number } {
43
+ const prop = node.properties.find((p) => p.key === 'instances');
44
+ if (!prop) return { min: 1, max: 1 };
45
+ if (typeof prop.value === 'number') return { min: prop.value, max: prop.value };
46
+ const str = String(prop.value);
47
+ const dash = str.indexOf('-');
48
+ if (dash >= 0) {
49
+ const min = parseInt(str.substring(0, dash), 10);
50
+ const max = parseInt(str.substring(dash + 1), 10);
51
+ return { min: isNaN(min) ? 1 : min, max: isNaN(max) ? min : max };
52
+ }
53
+ const num = parseInt(str, 10);
54
+ return { min: isNaN(num) ? 1 : num, max: isNaN(num) ? 1 : num };
55
+ }
56
+
57
+ /** Check if a node is serverless (has concurrency property). */
58
+ function isServerless(node: InfraNode): boolean {
59
+ return getNumProp(node, 'concurrency', 0) > 0;
60
+ }
61
+
62
+ /** Check if a node is a queue (has buffer property). */
63
+ function isQueue(node: InfraNode): boolean {
64
+ return getNumProp(node, 'buffer', 0) > 0;
65
+ }
66
+
67
+ /** Compute serverless effective capacity: concurrency / (duration_ms / 1000). */
68
+ function serverlessCapacity(node: InfraNode): number {
69
+ const concurrency = getNumProp(node, 'concurrency', 0);
70
+ const durationMs = getNumProp(node, 'duration-ms', 100);
71
+ return concurrency / (durationMs / 1000);
72
+ }
73
+
74
+ /** Backward-compatible helper used by overload detection. */
75
+ function getInstances(node: InfraNode): number {
76
+ return getInstanceRange(node).min;
77
+ }
78
+
79
+ /** Compute dynamic instance count based on load and max-rps. */
80
+ function computeDynamicInstances(node: InfraNode, computedRps: number): number {
81
+ const { min, max } = getInstanceRange(node);
82
+ const maxRps = getNumProp(node, 'max-rps', 0);
83
+ if (maxRps <= 0 || min === max) return min;
84
+ const needed = Math.ceil(computedRps / maxRps);
85
+ return Math.min(Math.max(needed, min), max);
86
+ }
87
+
88
+ /** Determine circuit breaker state from thresholds and current load. */
89
+ function computeCbState(node: InfraNode, computedRps: number, computedLatencyMs: number, instanceOverride?: number, groupMultiplier = 1): InfraCbState {
90
+ const errorThreshold = getNumProp(node, 'cb-error-threshold', 0);
91
+ const latencyThreshold = getNumProp(node, 'cb-latency-threshold-ms', 0);
92
+ if (errorThreshold <= 0 && latencyThreshold <= 0) return 'closed';
93
+
94
+ // Error-rate based: overloaded nodes have error rate proportional to excess
95
+ let capacity: number;
96
+ if (isServerless(node)) {
97
+ capacity = serverlessCapacity(node);
98
+ } else {
99
+ const maxRps = getNumProp(node, 'max-rps', 0);
100
+ const instances = instanceOverride ?? computeDynamicInstances(node, computedRps);
101
+ capacity = maxRps > 0 ? maxRps * instances * groupMultiplier : 0;
102
+ }
103
+
104
+ if (errorThreshold > 0 && capacity > 0) {
105
+ const errorRate = computedRps > capacity ? ((computedRps - capacity) / computedRps) * 100 : 0;
106
+ if (errorRate >= errorThreshold) return 'open';
107
+ }
108
+
109
+ // Latency-based
110
+ if (latencyThreshold > 0 && computedLatencyMs > latencyThreshold) return 'open';
111
+
112
+ return 'closed';
113
+ }
114
+
115
+ /**
116
+ * Compute local availability at a node (0-1).
117
+ * Factors: uptime, overload shed (rps > capacity), rate-limit reject (rps > ratelimit).
118
+ * Cache-hit and firewall-block are NOT availability reducers — they're
119
+ * successful traffic reductions (cache serves, firewall correctly blocks).
120
+ */
121
+ function computeLocalAvailability(node: InfraNode, inboundRps: number, instanceOverride?: number, groupMultiplier = 1, defaultUptime = 100): number {
122
+ if (node.isEdge) return 1;
123
+
124
+ let avail = 1;
125
+
126
+ // Uptime factor
127
+ const uptime = getNumProp(node, 'uptime', defaultUptime) / 100;
128
+ avail *= uptime;
129
+
130
+ // Queue nodes: availability = 1.0 when buffer has headroom, degrades on overflow
131
+ // drain-rate scales with group instances (more consumers), buffer does NOT
132
+ if (isQueue(node)) {
133
+ const buffer = getNumProp(node, 'buffer', 0);
134
+ const drainRate = getNumProp(node, 'drain-rate', 0) * groupMultiplier;
135
+ if (drainRate > 0 && buffer > 0) {
136
+ const fillRate = Math.max(0, inboundRps - drainRate);
137
+ if (fillRate > 0) {
138
+ const timeToOverflow = buffer / fillRate;
139
+ // If overflow is imminent (< 60s sustained), availability degrades
140
+ if (timeToOverflow < 60) {
141
+ avail *= drainRate / inboundRps; // proportional to excess being dropped
142
+ }
143
+ // Otherwise buffer is absorbing — availability stays at 1.0
144
+ }
145
+ }
146
+ return avail;
147
+ }
148
+
149
+ // Overload shed: if RPS exceeds total capacity, excess requests fail
150
+ if (isServerless(node)) {
151
+ const capacity = serverlessCapacity(node);
152
+ if (capacity > 0 && inboundRps > capacity) {
153
+ avail *= capacity / inboundRps;
154
+ }
155
+ } else {
156
+ const maxRps = getNumProp(node, 'max-rps', 0);
157
+ if (maxRps > 0 && inboundRps > 0) {
158
+ const instances = instanceOverride ?? computeDynamicInstances(node, inboundRps);
159
+ const capacity = maxRps * instances * groupMultiplier;
160
+ if (inboundRps > capacity) {
161
+ avail *= capacity / inboundRps;
162
+ }
163
+ }
164
+ }
165
+
166
+ // Rate-limit reject: requests above the rate limit are rejected
167
+ // Pre-ratelimit RPS = after cache-hit, firewall-block but before ratelimit
168
+ const rateLimit = getNumProp(node, 'ratelimit-rps', 0);
169
+ if (rateLimit > 0 && inboundRps > 0) {
170
+ let preRateLimitRps = inboundRps;
171
+ const cacheHit = getNumProp(node, 'cache-hit', 0);
172
+ if (cacheHit > 0) preRateLimitRps *= (100 - cacheHit) / 100;
173
+ const fwBlock = getNumProp(node, 'firewall-block', 0);
174
+ if (fwBlock > 0) preRateLimitRps *= (100 - fwBlock) / 100;
175
+
176
+ if (preRateLimitRps > rateLimit) {
177
+ avail *= rateLimit / preRateLimitRps;
178
+ }
179
+ }
180
+
181
+ return avail;
182
+ }
183
+
184
+ /**
185
+ * Compute the effective outbound rps after applying behavior transformations.
186
+ * Each behavior reduces the traffic that flows downstream:
187
+ * - cache-hit: N% → only (100 - N)% passes through
188
+ * - firewall-block: N% → only (100 - N)% passes through
189
+ * - ratelimit-rps: N → caps outbound at N rps
190
+ */
191
+ function applyBehaviors(node: InfraNode, inboundRps: number, groupMultiplier = 1): number {
192
+ let rps = inboundRps;
193
+
194
+ const cacheHit = getNumProp(node, 'cache-hit', 0);
195
+ if (cacheHit > 0) rps *= (100 - cacheHit) / 100;
196
+
197
+ const fwBlock = getNumProp(node, 'firewall-block', 0);
198
+ if (fwBlock > 0) rps *= (100 - fwBlock) / 100;
199
+
200
+ const rateLimit = getNumProp(node, 'ratelimit-rps', 0);
201
+ if (rateLimit > 0 && rps > rateLimit) rps = rateLimit;
202
+
203
+ // Queue: cap outbound at drain-rate (scaled by group instances)
204
+ if (isQueue(node)) {
205
+ const drainRate = getNumProp(node, 'drain-rate', 0) * groupMultiplier;
206
+ if (drainRate > 0 && rps > drainRate) rps = drainRate;
207
+ }
208
+
209
+ return rps;
210
+ }
211
+
212
+ // ============================================================
213
+ // Group Collapse — rewrite parsed model replacing collapsed groups with virtual nodes
214
+ // ============================================================
215
+
216
+ /**
217
+ * Collapse specified groups into virtual nodes.
218
+ * - Child nodes are removed
219
+ * - Internal edges are removed
220
+ * - A virtual node is created with aggregated properties
221
+ * - External edges are re-routed to/from the virtual node
222
+ */
223
+ interface CollapseResult {
224
+ parsed: ParsedInfra;
225
+ /** Per-group child capacities: each child's max-rps × child instances. */
226
+ childCapacities: Map<string, number[]>;
227
+ /** Per-group instance count. */
228
+ groupInstances: Map<string, number>;
229
+ }
230
+
231
+ function collapseGroups(parsed: ParsedInfra, collapsedIds: Set<string>, defaultLatencyMs = 0, defaultUptime = 100): CollapseResult {
232
+ if (collapsedIds.size === 0) return { parsed, childCapacities: new Map(), groupInstances: new Map() };
233
+
234
+ // Build child sets per collapsed group
235
+ const groupChildren = new Map<string, InfraNode[]>();
236
+ for (const node of parsed.nodes) {
237
+ if (node.groupId && collapsedIds.has(node.groupId)) {
238
+ const list = groupChildren.get(node.groupId) ?? [];
239
+ list.push(node);
240
+ groupChildren.set(node.groupId, list);
241
+ }
242
+ }
243
+
244
+ // All child node IDs across all collapsed groups
245
+ const childIds = new Set<string>();
246
+ for (const children of groupChildren.values()) {
247
+ for (const c of children) childIds.add(c.id);
248
+ }
249
+
250
+ // Map child node → its collapsed group
251
+ const nodeToGroup = new Map<string, string>();
252
+ for (const node of parsed.nodes) {
253
+ if (node.groupId && collapsedIds.has(node.groupId)) {
254
+ nodeToGroup.set(node.id, node.groupId);
255
+ }
256
+ }
257
+
258
+ // Classify edges
259
+ const internalEdges = new Set<InfraEdge>();
260
+ // External edges entering a collapsed group (target is a child)
261
+ const inboundEdges: InfraEdge[] = [];
262
+ // External edges leaving a collapsed group (source is a child)
263
+ const outboundEdges: InfraEdge[] = [];
264
+ // Cross-group edges (source in group A, target in group B)
265
+ const crossGroupEdges: InfraEdge[] = [];
266
+ const otherEdges: InfraEdge[] = [];
267
+
268
+ for (const edge of parsed.edges) {
269
+ const srcInside = childIds.has(edge.sourceId);
270
+ const tgtInside = childIds.has(edge.targetId);
271
+ if (srcInside && tgtInside) {
272
+ const srcGroup = nodeToGroup.get(edge.sourceId);
273
+ const tgtGroup = nodeToGroup.get(edge.targetId);
274
+ if (srcGroup === tgtGroup) {
275
+ internalEdges.add(edge);
276
+ } else {
277
+ // Different collapsed groups — re-route both ends
278
+ crossGroupEdges.push(edge);
279
+ }
280
+ } else if (!srcInside && tgtInside) {
281
+ inboundEdges.push(edge);
282
+ } else if (srcInside && !tgtInside) {
283
+ outboundEdges.push(edge);
284
+ } else {
285
+ otherEdges.push(edge);
286
+ }
287
+ }
288
+
289
+ // Build virtual nodes for each collapsed group
290
+ const virtualNodes: InfraNode[] = [];
291
+ const childCapacities = new Map<string, number[]>();
292
+ const groupInstancesMap = new Map<string, number>();
293
+ for (const group of parsed.groups) {
294
+ if (!collapsedIds.has(group.id)) continue;
295
+ const children = groupChildren.get(group.id) ?? [];
296
+ if (children.length === 0) continue;
297
+
298
+ // Aggregate properties: sum latencies, bottleneck capacity, compose behaviors
299
+ let totalLatency = 0;
300
+ let minEffectiveCapacity = Infinity;
301
+ let hasMaxRps = false;
302
+ let composedUptime = 1;
303
+ const behaviorProps: InfraProperty[] = [];
304
+ const perChildCapacities: number[] = [];
305
+
306
+ for (const child of children) {
307
+ // Latency: use explicit value, or diagram default (matching BFS behavior)
308
+ const latencyProp = child.properties.find((p) => p.key === 'latency-ms');
309
+ const childIsServerless = child.properties.some((p) => p.key === 'concurrency');
310
+ if (childIsServerless) {
311
+ // Serverless nodes use duration-ms as latency contribution
312
+ const durationProp = child.properties.find((p) => p.key === 'duration-ms');
313
+ totalLatency += durationProp
314
+ ? (typeof durationProp.value === 'number' ? durationProp.value : parseFloat(String(durationProp.value)) || 100)
315
+ : 100;
316
+ } else if (latencyProp) {
317
+ totalLatency += (typeof latencyProp.value === 'number' ? latencyProp.value : parseFloat(String(latencyProp.value)) || 0);
318
+ } else {
319
+ totalLatency += defaultLatencyMs;
320
+ }
321
+
322
+ const maxRps = child.properties.find((p) => p.key === 'max-rps');
323
+ if (maxRps) {
324
+ hasMaxRps = true;
325
+ const val = typeof maxRps.value === 'number' ? maxRps.value : parseFloat(String(maxRps.value)) || 0;
326
+ // Effective capacity = max-rps × child instances
327
+ const childInstProp = child.properties.find((p) => p.key === 'instances');
328
+ const childInst = childInstProp && typeof childInstProp.value === 'number' ? childInstProp.value : 1;
329
+ const effectiveCapacity = val * childInst;
330
+ if (effectiveCapacity < minEffectiveCapacity) minEffectiveCapacity = effectiveCapacity;
331
+ perChildCapacities.push(effectiveCapacity);
332
+ }
333
+
334
+ // Uptime: use explicit value, or diagram default (matching BFS behavior)
335
+ const uptimeProp = child.properties.find((p) => p.key === 'uptime');
336
+ const uptimeVal = uptimeProp
337
+ ? (typeof uptimeProp.value === 'number' ? uptimeProp.value : parseFloat(String(uptimeProp.value)) || 100)
338
+ : defaultUptime;
339
+ composedUptime *= uptimeVal / 100;
340
+
341
+ // Collect behavior keys (cache-hit, firewall-block, ratelimit-rps)
342
+ // and queue/serverless properties
343
+ for (const prop of child.properties) {
344
+ if (['cache-hit', 'firewall-block', 'ratelimit-rps',
345
+ 'buffer', 'drain-rate', 'retention-hours', 'partitions',
346
+ 'concurrency', 'duration-ms', 'cold-start-ms'].includes(prop.key)) {
347
+ behaviorProps.push(prop);
348
+ }
349
+ }
350
+ }
351
+
352
+ // Build virtual node properties
353
+ const props: InfraProperty[] = [];
354
+ if (totalLatency > 0) props.push({ key: 'latency-ms', value: totalLatency, lineNumber: group.lineNumber });
355
+
356
+ // Apply group instances multiplier to bottleneck capacity
357
+ // minEffectiveCapacity already includes child instances; multiply by group instances
358
+ const groupInstances = typeof group.instances === 'number' ? group.instances : 1;
359
+ if (hasMaxRps && minEffectiveCapacity < Infinity) {
360
+ props.push({ key: 'max-rps', value: Math.round(minEffectiveCapacity * groupInstances), lineNumber: group.lineNumber });
361
+ }
362
+ if (composedUptime < 1) {
363
+ props.push({ key: 'uptime', value: Math.round(composedUptime * 10000) / 100, lineNumber: group.lineNumber });
364
+ }
365
+ // Note: we do NOT push `instances` as a node property because max-rps is
366
+ // already multiplied by groupInstances. Adding instances would double-count
367
+ // in computeDynamicInstances which multiplies capacity = maxRps × instances.
368
+ // drain-rate scales with group instances (more consumers), buffer does NOT.
369
+ for (const bp of behaviorProps) {
370
+ if (bp.key === 'drain-rate') {
371
+ const val = typeof bp.value === 'number' ? bp.value : parseFloat(String(bp.value)) || 0;
372
+ props.push({ ...bp, value: val * groupInstances, lineNumber: group.lineNumber });
373
+ } else {
374
+ props.push({ ...bp, lineNumber: group.lineNumber });
375
+ }
376
+ }
377
+
378
+ // Merge tags from children (first non-empty wins per key)
379
+ const tags: Record<string, string> = {};
380
+ for (const child of children) {
381
+ for (const [k, v] of Object.entries(child.tags)) {
382
+ if (!(k in tags)) tags[k] = v;
383
+ }
384
+ }
385
+
386
+ const gInst = typeof group.instances === 'number' ? group.instances : 1;
387
+ groupInstancesMap.set(group.id, gInst);
388
+ if (perChildCapacities.length > 0) {
389
+ childCapacities.set(group.id, perChildCapacities);
390
+ }
391
+
392
+ virtualNodes.push({
393
+ id: group.id,
394
+ label: group.label,
395
+ properties: props,
396
+ groupId: null,
397
+ tags,
398
+ isEdge: false,
399
+ lineNumber: group.lineNumber,
400
+ });
401
+ }
402
+
403
+ // Re-route edges through virtual nodes
404
+ const rewrittenEdges: InfraEdge[] = [...otherEdges];
405
+ const seenEdgeKeys = new Set<string>();
406
+
407
+ // Inbound: target was a child → re-target to group virtual node
408
+ for (const edge of inboundEdges) {
409
+ const groupId = nodeToGroup.get(edge.targetId)!;
410
+ const key = `${edge.sourceId}->${groupId}`;
411
+ if (seenEdgeKeys.has(key)) continue;
412
+ seenEdgeKeys.add(key);
413
+ rewrittenEdges.push({ ...edge, targetId: groupId });
414
+ }
415
+
416
+ // Outbound: source was a child → re-source from group virtual node
417
+ for (const edge of outboundEdges) {
418
+ const groupId = nodeToGroup.get(edge.sourceId)!;
419
+ const key = `${groupId}->${edge.targetId}`;
420
+ if (seenEdgeKeys.has(key)) continue;
421
+ seenEdgeKeys.add(key);
422
+ rewrittenEdges.push({ ...edge, sourceId: groupId });
423
+ }
424
+
425
+ // Cross-group: both source and target in different collapsed groups
426
+ for (const edge of crossGroupEdges) {
427
+ const srcGroupId = nodeToGroup.get(edge.sourceId)!;
428
+ const tgtGroupId = nodeToGroup.get(edge.targetId)!;
429
+ const key = `${srcGroupId}->${tgtGroupId}`;
430
+ if (seenEdgeKeys.has(key)) continue;
431
+ seenEdgeKeys.add(key);
432
+ rewrittenEdges.push({ ...edge, sourceId: srcGroupId, targetId: tgtGroupId });
433
+ }
434
+
435
+ // Build new node list: non-child nodes + virtual nodes
436
+ const newNodes = parsed.nodes.filter((n) => !childIds.has(n.id));
437
+ newNodes.push(...virtualNodes);
438
+
439
+ // Groups: keep non-collapsed groups
440
+ const newGroups = parsed.groups.filter((g) => !collapsedIds.has(g.id));
441
+
442
+ return {
443
+ parsed: {
444
+ ...parsed,
445
+ nodes: newNodes,
446
+ edges: rewrittenEdges,
447
+ groups: newGroups,
448
+ },
449
+ childCapacities,
450
+ groupInstances: groupInstancesMap,
451
+ };
452
+ }
453
+
454
+ // ============================================================
455
+ // Split Resolution
456
+ // ============================================================
457
+
458
+ interface ResolvedSplit {
459
+ edge: ParsedInfra['edges'][number];
460
+ split: number; // 0-100
461
+ }
462
+
463
+ /**
464
+ * Resolve splits for outbound edges of a node.
465
+ * - All declared: validate sum = 100
466
+ * - None declared: even distribution
467
+ * - Some declared: remainder distributed evenly among undeclared
468
+ */
469
+ function resolveSplits(
470
+ outbound: ParsedInfra['edges'],
471
+ diagnostics: InfraDiagnostic[],
472
+ ): ResolvedSplit[] {
473
+ if (outbound.length === 0) return [];
474
+ if (outbound.length === 1) {
475
+ return [{ edge: outbound[0], split: 100 }];
476
+ }
477
+
478
+ const declared = outbound.filter((e) => e.split !== null);
479
+ const undeclared = outbound.filter((e) => e.split === null);
480
+
481
+ if (declared.length === outbound.length) {
482
+ // All declared — validate sum
483
+ const sum = declared.reduce((s, e) => s + (e.split ?? 0), 0);
484
+ if (Math.abs(sum - 100) > 0.01) {
485
+ diagnostics.push({
486
+ type: 'SPLIT_SUM',
487
+ line: declared[0].lineNumber,
488
+ message: `Split percentages sum to ${sum}%, expected 100%.`,
489
+ });
490
+ }
491
+ return declared.map((e) => ({ edge: e, split: e.split! }));
492
+ }
493
+
494
+ if (declared.length === 0) {
495
+ // None declared — even distribution
496
+ const even = 100 / outbound.length;
497
+ return outbound.map((e) => ({ edge: e, split: even }));
498
+ }
499
+
500
+ // Some declared, some not — distribute remainder
501
+ const declaredSum = declared.reduce((s, e) => s + (e.split ?? 0), 0);
502
+ const remainder = 100 - declaredSum;
503
+
504
+ if (remainder < 0) {
505
+ diagnostics.push({
506
+ type: 'SPLIT_SUM',
507
+ line: declared[0].lineNumber,
508
+ message: `Declared splits sum to ${declaredSum}%, exceeding 100%.`,
509
+ });
510
+ }
511
+
512
+ const evenRemainder = undeclared.length > 0 ? remainder / undeclared.length : 0;
513
+
514
+ return outbound.map((e) => ({
515
+ edge: e,
516
+ split: e.split !== null ? e.split : Math.max(0, evenRemainder),
517
+ }));
518
+ }
519
+
520
+ // ============================================================
521
+ // Main Computation
522
+ // ============================================================
523
+
524
+ export function computeInfra(
525
+ parsed: ParsedInfra,
526
+ params: InfraComputeParams = {},
527
+ ): ComputedInfraModel {
528
+ const diagnostics: InfraDiagnostic[] = [];
529
+
530
+ // Chart-level defaults for latency and uptime
531
+ const defaultLatencyMs = parseFloat(parsed.options['default-latency-ms'] ?? '') || 0;
532
+ const defaultUptime = parseFloat(parsed.options['default-uptime'] ?? '') || 100;
533
+
534
+ // Apply scenario overrides (shallow clone nodes with modified properties)
535
+ let effectiveNodes = parsed.nodes;
536
+ if (params.scenario) {
537
+ const overrides = params.scenario.overrides;
538
+ effectiveNodes = parsed.nodes.map((node) => {
539
+ const nodeOverrides = overrides[node.id];
540
+ if (!nodeOverrides) return node;
541
+ const props = node.properties.map((p) => {
542
+ const ov = nodeOverrides[p.key];
543
+ return ov != null ? { ...p, value: ov } : p;
544
+ });
545
+ // Add new properties from scenario that don't exist on the node
546
+ for (const [key, val] of Object.entries(nodeOverrides)) {
547
+ if (!props.some((p) => p.key === key)) {
548
+ props.push({ key, value: val, lineNumber: node.lineNumber });
549
+ }
550
+ }
551
+ return { ...node, properties: props };
552
+ });
553
+ }
554
+ // Apply per-node property overrides (from interactive sliders)
555
+ // These take precedence over scenario overrides
556
+ if (params.propertyOverrides) {
557
+ const propOv = params.propertyOverrides;
558
+ effectiveNodes = effectiveNodes.map((node) => {
559
+ const nodeOv = propOv[node.id];
560
+ if (!nodeOv) return node;
561
+ const props = node.properties.map((p) => {
562
+ const ov = nodeOv[p.key];
563
+ return ov != null ? { ...p, value: ov } : p;
564
+ });
565
+ // Add new properties from overrides that don't exist on the node
566
+ for (const [key, val] of Object.entries(nodeOv)) {
567
+ if (!props.some((p) => p.key === key)) {
568
+ props.push({ key, value: val, lineNumber: node.lineNumber });
569
+ }
570
+ }
571
+ return { ...node, properties: props };
572
+ });
573
+ }
574
+
575
+ let effectiveParsed = effectiveNodes === parsed.nodes ? parsed : { ...parsed, nodes: effectiveNodes };
576
+
577
+ // ── Collapse groups into virtual nodes ──
578
+ const collapsedGroups = params.collapsedGroups ?? new Set(
579
+ parsed.groups.filter((g) => g.collapsed).map((g) => g.id)
580
+ );
581
+ let collapseChildCaps = new Map<string, number[]>();
582
+ let collapseGroupInst = new Map<string, number>();
583
+ if (collapsedGroups.size > 0) {
584
+ const cr = collapseGroups(effectiveParsed, collapsedGroups, defaultLatencyMs, defaultUptime);
585
+ effectiveParsed = cr.parsed;
586
+ collapseChildCaps = cr.childCapacities;
587
+ collapseGroupInst = cr.groupInstances;
588
+ }
589
+
590
+ // Build lookup maps
591
+ const nodeMap = new Map<string, InfraNode>();
592
+ for (const node of effectiveParsed.nodes) {
593
+ nodeMap.set(node.id, node);
594
+ }
595
+
596
+ // Group edges by source for resolving edges that target groups
597
+ // If an edge targets a [Group], resolve to the first child node in that group
598
+ const groupChildMap = new Map<string, string[]>();
599
+ for (const node of effectiveParsed.nodes) {
600
+ if (node.groupId) {
601
+ const children = groupChildMap.get(node.groupId) ?? [];
602
+ children.push(node.id);
603
+ groupChildMap.set(node.groupId, children);
604
+ }
605
+ }
606
+
607
+ // Build group instance multiplier: nodeId → parent group's instances
608
+ // When a group has `instances: N`, each child's effective capacity is multiplied by N
609
+ const groupInstMultiplier = new Map<string, number>();
610
+ for (const group of effectiveParsed.groups) {
611
+ const gi = typeof group.instances === 'number' ? group.instances :
612
+ typeof group.instances === 'string' ? parseInt(group.instances, 10) || 1 : 1;
613
+ if (gi > 1) {
614
+ const children = groupChildMap.get(group.id) ?? [];
615
+ for (const childId of children) {
616
+ groupInstMultiplier.set(childId, gi);
617
+ }
618
+ }
619
+ }
620
+
621
+ // Build outbound edge map (sourceId -> edges[])
622
+ const outboundMap = new Map<string, typeof effectiveParsed.edges>();
623
+ for (const edge of effectiveParsed.edges) {
624
+ const list = outboundMap.get(edge.sourceId) ?? [];
625
+ list.push(edge);
626
+ outboundMap.set(edge.sourceId, list);
627
+ }
628
+
629
+ // Computed rps per node
630
+ const computedRps = new Map<string, number>();
631
+ const computedEdgeRps = new Map<string, number>(); // key: `sourceId->targetId`
632
+ // Latency: cumulative ms from edge to each node (max across incoming paths)
633
+ const computedLatency = new Map<string, number>();
634
+ // Uptime: product of uptimes along the path (min across incoming paths)
635
+ const computedUptime = new Map<string, number>();
636
+ // Per-node local availability (computed after RPS is known)
637
+ const localAvailability = new Map<string, number>();
638
+
639
+ // Find edge entry point
640
+ const edgeNode = effectiveParsed.nodes.find((n) => n.isEdge);
641
+ const baseRps = params.rps ?? (edgeNode ? getNumProp(edgeNode, 'rps', 0) : 0);
642
+
643
+ // BFS traversal from edge
644
+ if (edgeNode) {
645
+ computedRps.set(edgeNode.id, baseRps);
646
+ computedLatency.set(edgeNode.id, 0);
647
+ computedUptime.set(edgeNode.id, 1);
648
+
649
+ const queue: string[] = [edgeNode.id];
650
+ const visited = new Set<string>();
651
+
652
+ while (queue.length > 0) {
653
+ const currentId = queue.shift()!;
654
+ if (visited.has(currentId)) continue;
655
+ visited.add(currentId);
656
+
657
+ const currentNode = nodeMap.get(currentId);
658
+ if (!currentNode) continue;
659
+
660
+ const inbound = computedRps.get(currentId) ?? 0;
661
+ const currentLatency = computedLatency.get(currentId) ?? 0;
662
+ const currentUptime = computedUptime.get(currentId) ?? 1;
663
+
664
+ // Add this node's latency and uptime
665
+ // Serverless nodes use duration-ms as their latency contribution
666
+ const nodeLatency = currentNode.isEdge ? 0
667
+ : isServerless(currentNode) ? getNumProp(currentNode, 'duration-ms', 100)
668
+ : getNumProp(currentNode, 'latency-ms', defaultLatencyMs);
669
+ const nodeUptime = currentNode.isEdge ? 1 : getNumProp(currentNode, 'uptime', defaultUptime) / 100;
670
+ const cumulativeLatency = currentLatency + nodeLatency;
671
+ const cumulativeUptime = currentUptime * nodeUptime;
672
+
673
+ // Apply behavior transformations to get outbound rps
674
+ const gMul = groupInstMultiplier.get(currentId) ?? 1;
675
+ const outboundRps = currentNode.isEdge ? inbound : applyBehaviors(currentNode, inbound, gMul);
676
+
677
+ // Queue latency boundary: downstream latency starts from queue wait time, not cumulative
678
+ let downstreamLatency = cumulativeLatency;
679
+ if (isQueue(currentNode)) {
680
+ const drainRate = getNumProp(currentNode, 'drain-rate', 0) * gMul;
681
+ const fillRate = drainRate > 0 ? Math.max(0, inbound - drainRate) : 0;
682
+ const waitTimeMs = fillRate > 0 && drainRate > 0 ? (fillRate / drainRate) * 1000 : 0;
683
+ downstreamLatency = waitTimeMs; // reset: consumer-side starts fresh from queue wait
684
+ }
685
+
686
+ // Get outbound edges
687
+ const outbound = outboundMap.get(currentId) ?? [];
688
+ if (outbound.length === 0) continue;
689
+
690
+ // Resolve splits
691
+ const resolved = resolveSplits(outbound, diagnostics);
692
+
693
+ for (const { edge, split } of resolved) {
694
+ const edgeRps = outboundRps * (split / 100);
695
+ const edgeKey = `${edge.sourceId}->${edge.targetId}`;
696
+ computedEdgeRps.set(edgeKey, edgeRps);
697
+
698
+ // Resolve target — could be a group or a node
699
+ let targetIds: string[];
700
+ const groupChildren = groupChildMap.get(edge.targetId);
701
+ if (groupChildren && groupChildren.length > 0) {
702
+ targetIds = groupChildren;
703
+ } else {
704
+ targetIds = [edge.targetId];
705
+ }
706
+
707
+ for (const targetId of targetIds) {
708
+ const perTarget = edgeRps / targetIds.length;
709
+ const existing = computedRps.get(targetId) ?? 0;
710
+ computedRps.set(targetId, existing + perTarget);
711
+
712
+ // Latency: take max across incoming paths (worst case)
713
+ const prevLatency = computedLatency.get(targetId) ?? 0;
714
+ if (downstreamLatency > prevLatency) {
715
+ computedLatency.set(targetId, downstreamLatency);
716
+ }
717
+
718
+ // Uptime: take min across incoming paths (most conservative)
719
+ const prevUptime = computedUptime.get(targetId) ?? 1;
720
+ if (cumulativeUptime < prevUptime) {
721
+ computedUptime.set(targetId, cumulativeUptime);
722
+ }
723
+
724
+ if (!visited.has(targetId)) {
725
+ queue.push(targetId);
726
+ }
727
+ }
728
+ }
729
+ }
730
+ }
731
+
732
+ // ── Per-node local & compound availability ──
733
+ // Local availability depends on inbound RPS, so computed after BFS.
734
+ // Compound availability = product of local availabilities from edge to this node.
735
+ const instanceOverrides = params.instanceOverrides ?? {};
736
+ for (const node of effectiveParsed.nodes) {
737
+ const rps = computedRps.get(node.id) ?? 0;
738
+ localAvailability.set(node.id, computeLocalAvailability(node, rps, instanceOverrides[node.id], groupInstMultiplier.get(node.id) ?? 1, defaultUptime));
739
+ }
740
+
741
+ // Propagate compound availability via second BFS
742
+ const compoundAvailability = new Map<string, number>();
743
+ if (edgeNode) {
744
+ compoundAvailability.set(edgeNode.id, localAvailability.get(edgeNode.id) ?? 1);
745
+ const queue2: string[] = [edgeNode.id];
746
+ const visited2 = new Set<string>();
747
+ while (queue2.length > 0) {
748
+ const currentId = queue2.shift()!;
749
+ if (visited2.has(currentId)) continue;
750
+ visited2.add(currentId);
751
+ const cumAvail = compoundAvailability.get(currentId) ?? 1;
752
+ const currentNode = nodeMap.get(currentId);
753
+ // Queue boundary: downstream availability starts fresh (producer-side decoupled)
754
+ const propagatedAvail = currentNode && isQueue(currentNode) ? localAvailability.get(currentId) ?? 1 : cumAvail;
755
+ const outbound = outboundMap.get(currentId) ?? [];
756
+ for (const edge of outbound) {
757
+ const groupChildren = groupChildMap.get(edge.targetId);
758
+ const targetIds = (groupChildren && groupChildren.length > 0) ? groupChildren : [edge.targetId];
759
+ for (const targetId of targetIds) {
760
+ const targetLocal = localAvailability.get(targetId) ?? 1;
761
+ const newCompound = propagatedAvail * targetLocal;
762
+ // Take min (most conservative) across incoming paths
763
+ const prev = compoundAvailability.get(targetId) ?? 1;
764
+ if (newCompound < prev) {
765
+ compoundAvailability.set(targetId, newCompound);
766
+ }
767
+ if (!visited2.has(targetId)) queue2.push(targetId);
768
+ }
769
+ }
770
+ }
771
+ }
772
+
773
+ // ── Per-node latency percentiles ──
774
+ // For each node, collect all downstream leaf paths (latency, weight) via DFS,
775
+ // then derive p50/p90/p99 weighted by traffic.
776
+
777
+ type LeafPath = { latency: number; uptime: number; availability: number; weight: number };
778
+ const nodeLeafCache = new Map<string, LeafPath[]>();
779
+
780
+ /** Collect leaf paths reachable from `nodeId` with accumulated extra latency/uptime. */
781
+ function collectLeafPaths(nodeId: string, visited: Set<string>): LeafPath[] {
782
+ if (nodeLeafCache.has(nodeId)) return nodeLeafCache.get(nodeId)!;
783
+ if (visited.has(nodeId)) return []; // cycle guard
784
+ visited.add(nodeId);
785
+
786
+ const node = nodeMap.get(nodeId);
787
+ if (!node) return [];
788
+
789
+ // Node latency contribution depends on type:
790
+ // - Queue: wait time (latency boundary — resets cumulative chain)
791
+ // - Serverless: duration-ms
792
+ // - Normal: latency-ms or default
793
+ let nodeLatency: number;
794
+ if (node.isEdge) {
795
+ nodeLatency = 0;
796
+ } else if (isQueue(node)) {
797
+ const qGMul = groupInstMultiplier.get(nodeId) ?? 1;
798
+ const drainRate = getNumProp(node, 'drain-rate', 0) * qGMul;
799
+ const inbound = computedRps.get(nodeId) ?? 0;
800
+ const fillRate = drainRate > 0 ? Math.max(0, inbound - drainRate) : 0;
801
+ nodeLatency = fillRate > 0 && drainRate > 0 ? (fillRate / drainRate) * 1000 : 0;
802
+ } else if (isServerless(node)) {
803
+ nodeLatency = getNumProp(node, 'duration-ms', 100);
804
+ } else {
805
+ nodeLatency = getNumProp(node, 'latency-ms', defaultLatencyMs);
806
+ }
807
+ const nodeUptimeFrac = node.isEdge ? 1 : getNumProp(node, 'uptime', defaultUptime) / 100;
808
+ const nodeAvail = localAvailability.get(nodeId) ?? 1;
809
+
810
+ // Serverless cold-start latency: split into warm (95%) and cold (5%) paths
811
+ const coldStartMs = isServerless(node) ? getNumProp(node, 'cold-start-ms', 0) : 0;
812
+ const coldLatency = nodeLatency + coldStartMs;
813
+
814
+ const outbound = outboundMap.get(nodeId) ?? [];
815
+ if (outbound.length === 0) {
816
+ // Leaf node — return self
817
+ const rps = computedRps.get(nodeId) ?? 0;
818
+ let result: LeafPath[];
819
+ if (rps <= 0) {
820
+ result = [];
821
+ } else if (coldStartMs > 0) {
822
+ // Split into warm/cold paths for percentile accuracy
823
+ // 95/5 split: cold starts affect ~5% of requests, visible at p99
824
+ result = [
825
+ { latency: nodeLatency, uptime: nodeUptimeFrac, availability: nodeAvail, weight: rps * 0.95 },
826
+ { latency: coldLatency, uptime: nodeUptimeFrac, availability: nodeAvail, weight: rps * 0.05 },
827
+ ];
828
+ } else {
829
+ result = [{ latency: nodeLatency, uptime: nodeUptimeFrac, availability: nodeAvail, weight: rps }];
830
+ }
831
+ nodeLeafCache.set(nodeId, result);
832
+ return result;
833
+ }
834
+
835
+ // Resolve splits for outbound edges
836
+ const resolved = resolveSplits(outbound, []);
837
+ const paths: LeafPath[] = [];
838
+
839
+ for (const { edge, split } of resolved) {
840
+ const groupChildren = groupChildMap.get(edge.targetId);
841
+ const targetIds = (groupChildren && groupChildren.length > 0)
842
+ ? groupChildren : [edge.targetId];
843
+
844
+ for (const targetId of targetIds) {
845
+ const childPaths = collectLeafPaths(targetId, new Set(visited));
846
+ for (const cp of childPaths) {
847
+ if (coldStartMs > 0) {
848
+ // Warm path (95% of requests)
849
+ paths.push({
850
+ latency: nodeLatency + cp.latency,
851
+ uptime: nodeUptimeFrac * cp.uptime,
852
+ availability: nodeAvail * cp.availability,
853
+ weight: cp.weight * (split / 100) / targetIds.length * 0.95,
854
+ });
855
+ // Cold path (5% of requests)
856
+ paths.push({
857
+ latency: coldLatency + cp.latency,
858
+ uptime: nodeUptimeFrac * cp.uptime,
859
+ availability: nodeAvail * cp.availability,
860
+ weight: cp.weight * (split / 100) / targetIds.length * 0.05,
861
+ });
862
+ } else {
863
+ paths.push({
864
+ latency: nodeLatency + cp.latency,
865
+ uptime: nodeUptimeFrac * cp.uptime,
866
+ availability: nodeAvail * cp.availability,
867
+ weight: cp.weight * (split / 100) / targetIds.length,
868
+ });
869
+ }
870
+ }
871
+ }
872
+ }
873
+
874
+ nodeLeafCache.set(nodeId, paths);
875
+ return paths;
876
+ }
877
+
878
+ /** Compute percentiles from weighted leaf paths. */
879
+ function percentilesFromPaths(paths: LeafPath[]): InfraLatencyPercentiles {
880
+ if (paths.length === 0) return { p50: 0, p90: 0, p99: 0 };
881
+ const sorted = [...paths].sort((a, b) => a.latency - b.latency);
882
+ const totalWeight = sorted.reduce((s, p) => s + p.weight, 0);
883
+ if (totalWeight <= 0) return { p50: 0, p90: 0, p99: 0 };
884
+
885
+ const getP = (pct: number): number => {
886
+ const target = totalWeight * (pct / 100);
887
+ let cumulative = 0;
888
+ for (const p of sorted) {
889
+ cumulative += p.weight;
890
+ if (cumulative >= target) return p.latency;
891
+ }
892
+ return sorted[sorted.length - 1].latency;
893
+ };
894
+
895
+ return { p50: getP(50), p90: getP(90), p99: getP(99) };
896
+ }
897
+
898
+ /** Compute availability percentiles from weighted leaf paths. */
899
+ function availabilityPercentilesFromPaths(paths: LeafPath[]): InfraAvailabilityPercentiles {
900
+ if (paths.length === 0) return { p50: 1, p90: 1, p99: 1 };
901
+ // Sort ascending by availability (worst paths first)
902
+ const sorted = [...paths].sort((a, b) => a.availability - b.availability);
903
+ const totalWeight = sorted.reduce((s, p) => s + p.weight, 0);
904
+ if (totalWeight <= 0) return { p50: 1, p90: 1, p99: 1 };
905
+
906
+ const getP = (pct: number): number => {
907
+ const target = totalWeight * (pct / 100);
908
+ let cumulative = 0;
909
+ for (const p of sorted) {
910
+ cumulative += p.weight;
911
+ if (cumulative >= target) return p.availability;
912
+ }
913
+ return sorted[sorted.length - 1].availability;
914
+ };
915
+
916
+ return { p50: getP(50), p90: getP(90), p99: getP(99) };
917
+ }
918
+
919
+ // Pre-compute leaf paths for all nodes
920
+ for (const node of effectiveParsed.nodes) {
921
+ collectLeafPaths(node.id, new Set());
922
+ }
923
+
924
+ // System-wide percentiles (from edge node)
925
+ const edgeLeafPaths = edgeNode ? (nodeLeafCache.get(edgeNode.id) ?? []) : [];
926
+ const edgeLatency = percentilesFromPaths(edgeLeafPaths);
927
+ const systemUptime = edgeLeafPaths.length > 0
928
+ ? (() => {
929
+ const tw = edgeLeafPaths.reduce((s, p) => s + p.weight, 0);
930
+ return tw > 0 ? edgeLeafPaths.reduce((s, p) => s + p.uptime * (p.weight / tw), 0) : 1;
931
+ })()
932
+ : 1;
933
+ const systemAvailability = edgeLeafPaths.length > 0
934
+ ? (() => {
935
+ const tw = edgeLeafPaths.reduce((s, p) => s + p.weight, 0);
936
+ return tw > 0 ? edgeLeafPaths.reduce((s, p) => s + p.availability * (p.weight / tw), 0) : 1;
937
+ })()
938
+ : 1;
939
+
940
+ // Per-node percentiles + downstream availability map
941
+ const nodePercentiles = new Map<string, InfraLatencyPercentiles>();
942
+ const nodeAvailPercentiles = new Map<string, InfraAvailabilityPercentiles>();
943
+ const downstreamAvailability = new Map<string, number>();
944
+ for (const node of effectiveParsed.nodes) {
945
+ const paths = nodeLeafCache.get(node.id) ?? [];
946
+ nodePercentiles.set(node.id, percentilesFromPaths(paths));
947
+ nodeAvailPercentiles.set(node.id, availabilityPercentilesFromPaths(paths));
948
+ // Queue nodes decouple availability — use local availability only
949
+ if (isQueue(node)) {
950
+ downstreamAvailability.set(node.id, localAvailability.get(node.id) ?? 1);
951
+ } else {
952
+ // Weighted average of compound availability across all downstream leaf paths
953
+ const tw = paths.reduce((s, p) => s + p.weight, 0);
954
+ downstreamAvailability.set(node.id, tw > 0
955
+ ? paths.reduce((s, p) => s + p.availability * (p.weight / tw), 0)
956
+ : localAvailability.get(node.id) ?? 1);
957
+ }
958
+ }
959
+
960
+ // Build computed nodes
961
+ const computedNodes: ComputedInfraNode[] = effectiveParsed.nodes.map((node) => {
962
+ const rps = computedRps.get(node.id) ?? 0;
963
+ const gMul = groupInstMultiplier.get(node.id) ?? 1;
964
+ let capacity: number;
965
+ let dynInstances: number;
966
+
967
+ if (isServerless(node)) {
968
+ capacity = serverlessCapacity(node);
969
+ dynInstances = 0; // serverless has no instances
970
+ } else {
971
+ const maxRps = getNumProp(node, 'max-rps', 0);
972
+ dynInstances = instanceOverrides[node.id] ?? computeDynamicInstances(node, rps);
973
+ capacity = maxRps > 0 ? maxRps * dynInstances * gMul : 0;
974
+ }
975
+ const overloaded = capacity > 0 && rps > capacity;
976
+
977
+ // Rate-limit check: is inbound RPS (after cache/fw/bot) exceeding ratelimit-rps?
978
+ let rateLimited = false;
979
+ if (!node.isEdge) {
980
+ const rl = getNumProp(node, 'ratelimit-rps', 0);
981
+ if (rl > 0 && rps > 0) {
982
+ let preRl = rps;
983
+ const ch = getNumProp(node, 'cache-hit', 0);
984
+ if (ch > 0) preRl *= (100 - ch) / 100;
985
+ const fw = getNumProp(node, 'firewall-block', 0);
986
+ if (fw > 0) preRl *= (100 - fw) / 100;
987
+ rateLimited = preRl > rl;
988
+ }
989
+ }
990
+
991
+ // Serverless nodes use duration-ms as their latency contribution
992
+ const nodeLatency = node.isEdge ? 0
993
+ : isServerless(node) ? getNumProp(node, 'duration-ms', 100)
994
+ : getNumProp(node, 'latency-ms', defaultLatencyMs);
995
+ const cumulativeLatency = (computedLatency.get(node.id) ?? 0) + nodeLatency;
996
+ const nodeUptime = node.isEdge ? 1 : getNumProp(node, 'uptime', defaultUptime) / 100;
997
+ const cumulativeUptime = (computedUptime.get(node.id) ?? 1) * nodeUptime;
998
+
999
+ const cbState = computeCbState(node, rps, cumulativeLatency, instanceOverrides[node.id], gMul);
1000
+
1001
+ if (overloaded) {
1002
+ const capDetail = isServerless(node)
1003
+ ? `concurrency ${getNumProp(node, 'concurrency', 0)} / ${getNumProp(node, 'duration-ms', 100)}ms`
1004
+ : `${getNumProp(node, 'max-rps', 0)} x ${dynInstances}${gMul > 1 ? ` x ${gMul} group` : ''}`;
1005
+ diagnostics.push({
1006
+ type: 'OVERLOAD',
1007
+ line: node.lineNumber,
1008
+ message: `${node.label} is overloaded: ${Math.round(rps)} rps exceeds capacity ${Math.round(capacity)} rps (${capDetail}).`,
1009
+ });
1010
+ }
1011
+ if (rateLimited) {
1012
+ diagnostics.push({
1013
+ type: 'RATE_LIMITED',
1014
+ line: node.lineNumber,
1015
+ message: `${node.label} is rate-limiting: inbound traffic exceeds ratelimit-rps.`,
1016
+ });
1017
+ }
1018
+
1019
+ // For collapsed group nodes: compute worst child health state
1020
+ let childHealthState: 'normal' | 'warning' | 'overloaded' | undefined;
1021
+ const childCaps = collapseChildCaps.get(node.id);
1022
+ if (childCaps && childCaps.length > 0) {
1023
+ const gInst = collapseGroupInst.get(node.id) ?? 1;
1024
+ const perInstanceRps = rps / gInst;
1025
+ let worst: 'normal' | 'warning' | 'overloaded' = 'normal';
1026
+ for (const cap of childCaps) {
1027
+ if (cap > 0 && perInstanceRps > cap) worst = 'overloaded';
1028
+ else if (cap > 0 && perInstanceRps > cap * 0.7 && worst !== 'overloaded') worst = 'warning';
1029
+ }
1030
+ childHealthState = worst;
1031
+ }
1032
+
1033
+ // Queue metrics — drain-rate scales with group instances, buffer does NOT
1034
+ let queueMetrics: ComputedInfraNode['queueMetrics'];
1035
+ if (isQueue(node)) {
1036
+ const buffer = getNumProp(node, 'buffer', 0);
1037
+ const drainRate = getNumProp(node, 'drain-rate', 0) * gMul;
1038
+ const fillRate = drainRate > 0 ? Math.max(0, rps - drainRate) : 0;
1039
+ const timeToOverflow = fillRate > 0 ? buffer / fillRate : Infinity;
1040
+ const waitTimeMs = fillRate > 0 && drainRate > 0 ? (fillRate / drainRate) * 1000 : 0;
1041
+ queueMetrics = { fillRate, timeToOverflow, waitTimeMs };
1042
+ }
1043
+
1044
+ return {
1045
+ id: node.id,
1046
+ label: node.label,
1047
+ groupId: node.groupId,
1048
+ isEdge: node.isEdge,
1049
+ computedRps: rps,
1050
+ overloaded,
1051
+ rateLimited,
1052
+ computedLatencyMs: cumulativeLatency,
1053
+ computedLatencyPercentiles: nodePercentiles.get(node.id) ?? { p50: 0, p90: 0, p99: 0 },
1054
+ computedUptime: cumulativeUptime,
1055
+ computedAvailability: downstreamAvailability.get(node.id) ?? 1,
1056
+ computedAvailabilityPercentiles: nodeAvailPercentiles.get(node.id) ?? { p50: 1, p90: 1, p99: 1 },
1057
+ computedCbState: cbState,
1058
+ computedInstances: (collapseGroupInst.get(node.id) ?? dynInstances) * gMul,
1059
+ computedConcurrentInvocations: isServerless(node)
1060
+ ? Math.ceil(rps * getNumProp(node, 'duration-ms', 100) / 1000)
1061
+ : 0,
1062
+ childHealthState,
1063
+ queueMetrics,
1064
+ properties: node.properties,
1065
+ tags: node.tags,
1066
+ lineNumber: node.lineNumber,
1067
+ };
1068
+ });
1069
+
1070
+ // Build computed edges with resolved splits and rps
1071
+ const computedEdges: ComputedInfraEdge[] = effectiveParsed.edges.map((edge) => {
1072
+ const edgeKey = `${edge.sourceId}->${edge.targetId}`;
1073
+ const rps = computedEdgeRps.get(edgeKey) ?? 0;
1074
+
1075
+ // Get resolved split
1076
+ const outbound = outboundMap.get(edge.sourceId) ?? [];
1077
+ let resolvedSplit = edge.split ?? 100;
1078
+ if (outbound.length > 1 && edge.split === null) {
1079
+ // Was inferred
1080
+ const declared = outbound.filter((e) => e.split !== null);
1081
+ if (declared.length === 0) {
1082
+ resolvedSplit = 100 / outbound.length;
1083
+ } else {
1084
+ const declaredSum = declared.reduce((s, e) => s + (e.split ?? 0), 0);
1085
+ const undeclared = outbound.filter((e) => e.split === null);
1086
+ resolvedSplit = undeclared.length > 0 ? (100 - declaredSum) / undeclared.length : 0;
1087
+ }
1088
+ }
1089
+
1090
+ return {
1091
+ sourceId: edge.sourceId,
1092
+ targetId: edge.targetId,
1093
+ label: edge.label,
1094
+ computedRps: rps,
1095
+ split: resolvedSplit,
1096
+ lineNumber: edge.lineNumber,
1097
+ };
1098
+ });
1099
+
1100
+ return {
1101
+ nodes: computedNodes,
1102
+ edges: computedEdges,
1103
+ groups: effectiveParsed.groups,
1104
+ tagGroups: effectiveParsed.tagGroups,
1105
+ title: effectiveParsed.title,
1106
+ direction: effectiveParsed.direction,
1107
+ edgeLatency,
1108
+ systemUptime,
1109
+ systemAvailability,
1110
+ options: parsed.options,
1111
+ diagnostics,
1112
+ };
1113
+ }