@neuroverseos/nv-sim 0.1.4 → 0.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +260 -6
- package/dist/adapters/mirofish.js +461 -0
- package/dist/adapters/scienceclaw.js +750 -0
- package/dist/assets/index-CHmUN8s0.js +532 -0
- package/dist/assets/index-DWgMnB7I.css +1 -0
- package/dist/assets/{reportEngine-BfteK4MN.js → reportEngine-BVdQ2_nW.js} +1 -1
- package/dist/components/ConstraintsPanel.js +11 -0
- package/dist/components/StakeholderBuilder.js +32 -0
- package/dist/components/ui/badge.js +24 -0
- package/dist/components/ui/button.js +70 -0
- package/dist/components/ui/card.js +57 -0
- package/dist/components/ui/input.js +44 -0
- package/dist/components/ui/label.js +45 -0
- package/dist/components/ui/select.js +70 -0
- package/dist/engine/aiProvider.js +427 -2
- package/dist/engine/auditTrace.js +352 -0
- package/dist/engine/behavioralAnalysis.js +605 -0
- package/dist/engine/cli.js +1087 -13
- package/dist/engine/dynamicsGovernance.js +588 -0
- package/dist/engine/fullGovernedLoop.js +367 -0
- package/dist/engine/governedSimulation.js +77 -6
- package/dist/engine/index.js +41 -1
- package/dist/engine/liveVisualizer.js +1961 -197
- package/dist/engine/metrics/science.metrics.js +335 -0
- package/dist/engine/policyEnforcement.js +1611 -0
- package/dist/engine/policyEngine.js +799 -0
- package/dist/engine/primeRadiant.js +540 -0
- package/dist/engine/scenarioComparison.js +463 -0
- package/dist/engine/swarmSimulation.js +54 -1
- package/dist/engine/worldComparison.js +164 -0
- package/dist/engine/worldStorage.js +232 -0
- package/dist/index.html +2 -2
- package/dist/lib/reasoningEngine.js +290 -0
- package/dist/lib/simulationAdapter.js +686 -0
- package/dist/lib/swarmParser.js +291 -0
- package/dist/lib/types.js +2 -0
- package/dist/lib/utils.js +8 -0
- package/dist/runtime/govern.js +473 -0
- package/dist/runtime/index.js +75 -0
- package/dist/runtime/types.js +11 -0
- package/package.json +5 -2
- package/dist/assets/index-DHKd4rcV.js +0 -338
- package/dist/assets/index-SyyA3z3U.css +0 -1
- package/dist/assets/swarmSimulation-DHDqjfMa.js +0 -1
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.parseSwarmOutput = parseSwarmOutput;
|
|
4
|
+
const simulationAdapter_1 = require("./simulationAdapter");
|
|
5
|
+
/**
|
|
6
|
+
* Extract agent names from normalized swarm data.
|
|
7
|
+
* Falls back to text extraction if normalization found nothing.
|
|
8
|
+
*/
|
|
9
|
+
function extractAgentsFromNormalized(normalized) {
|
|
10
|
+
if (normalized.agents.length > 0) {
|
|
11
|
+
return normalized.agents.slice(0, 8).map(a => a.id);
|
|
12
|
+
}
|
|
13
|
+
// Fallback: extract from events
|
|
14
|
+
const agentSet = new Set();
|
|
15
|
+
for (const event of normalized.events) {
|
|
16
|
+
if (event.agents) {
|
|
17
|
+
for (const a of event.agents)
|
|
18
|
+
agentSet.add(a);
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
if (agentSet.size > 0)
|
|
22
|
+
return Array.from(agentSet).slice(0, 8);
|
|
23
|
+
// Fallback: extract from metrics (infer agent types)
|
|
24
|
+
for (const m of normalized.metrics) {
|
|
25
|
+
const lower = m.name.toLowerCase();
|
|
26
|
+
if (/consumer|producer|trader|government|firm|citizen/.test(lower)) {
|
|
27
|
+
agentSet.add(m.name);
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
if (agentSet.size > 0)
|
|
31
|
+
return Array.from(agentSet).slice(0, 8);
|
|
32
|
+
return ['Alpha Agent', 'Beta Agent', 'Gamma Agent', 'Delta Agent', 'Epsilon Agent'];
|
|
33
|
+
}
|
|
34
|
+
function extractAgents(text) {
|
|
35
|
+
const normalized = (0, simulationAdapter_1.parseSimulation)(text);
|
|
36
|
+
return extractAgentsFromNormalized(normalized);
|
|
37
|
+
}
|
|
38
|
+
function hashText(text) {
|
|
39
|
+
let hash = 0;
|
|
40
|
+
for (let i = 0; i < text.length; i++) {
|
|
41
|
+
hash = ((hash << 5) - hash) + text.charCodeAt(i);
|
|
42
|
+
hash |= 0;
|
|
43
|
+
}
|
|
44
|
+
return Math.abs(hash);
|
|
45
|
+
}
|
|
46
|
+
function seededRandom(seed) {
|
|
47
|
+
let s = seed;
|
|
48
|
+
return () => {
|
|
49
|
+
s = (s * 16807 + 0) % 2147483647;
|
|
50
|
+
return s / 2147483647;
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Parse simulation output into a SwarmResult.
|
|
55
|
+
*
|
|
56
|
+
* Uses the universal simulation adapter to normalize input from
|
|
57
|
+
* MiroFish, NetLogo, Mesa, AnyLogic/CSV, generic JSON, or freeform text,
|
|
58
|
+
* then generates structured reasoning overlay from the normalized data.
|
|
59
|
+
*
|
|
60
|
+
* Returns both the SwarmResult (for UI) and the detected format.
|
|
61
|
+
*/
|
|
62
|
+
function parseSwarmOutput(text) {
|
|
63
|
+
// Step 1: Normalize through universal adapter
|
|
64
|
+
const normalized = (0, simulationAdapter_1.parseSimulation)(text);
|
|
65
|
+
const format = normalized.sourceFormat;
|
|
66
|
+
// Step 2: Extract agents from normalized data
|
|
67
|
+
const agents = extractAgentsFromNormalized(normalized);
|
|
68
|
+
const seed = hashText(text);
|
|
69
|
+
const rand = seededRandom(seed);
|
|
70
|
+
// Step 3: Build patterns from normalized emergent behaviors + generation
|
|
71
|
+
const patterns = normalized.emergentBehaviors && normalized.emergentBehaviors.length > 0
|
|
72
|
+
? normalized.emergentBehaviors.map((b, i) => ({
|
|
73
|
+
name: b.length > 40 ? b.slice(0, 40) + '...' : b,
|
|
74
|
+
description: b,
|
|
75
|
+
strength: ['strong', 'moderate', 'weak'][Math.min(i, 2)],
|
|
76
|
+
agents: agents.filter(() => rand() > 0.4),
|
|
77
|
+
}))
|
|
78
|
+
: generatePatterns(agents, rand);
|
|
79
|
+
// Step 4: Build agent strategies from normalized agent data + generation
|
|
80
|
+
const agentStrategies = normalized.agents.length > 0
|
|
81
|
+
? normalized.agents.slice(0, 8).map(a => ({
|
|
82
|
+
agent: a.id,
|
|
83
|
+
strategy: a.action || a.state || 'Adaptive',
|
|
84
|
+
rationale: a.action
|
|
85
|
+
? `Agent exhibits "${a.action}" behavior based on simulation data`
|
|
86
|
+
: 'Strategy inferred from simulation context',
|
|
87
|
+
effectiveness: Math.round(40 + rand() * 55),
|
|
88
|
+
}))
|
|
89
|
+
: generateAgentStrategies(agents, rand);
|
|
90
|
+
// Step 5: Build coalitions from normalized + generation
|
|
91
|
+
const coalitions = normalized.coalitions.length > 0
|
|
92
|
+
? normalized.coalitions.map(c => ({
|
|
93
|
+
name: c.name,
|
|
94
|
+
members: c.members,
|
|
95
|
+
sharedObjective: c.objective || 'Coordinated strategy',
|
|
96
|
+
cohesion: c.strength ?? Math.round(50 + rand() * 45),
|
|
97
|
+
}))
|
|
98
|
+
: generateCoalitions(agents, rand);
|
|
99
|
+
const powerDynamics = generatePowerDynamics(agents, rand);
|
|
100
|
+
const equilibrium = generateEquilibrium(agents, rand);
|
|
101
|
+
// Step 6: Generate reasoning overlay
|
|
102
|
+
const paths = generateSwarmPaths(agents, rand);
|
|
103
|
+
const matrix = generateSwarmMatrix(agents, paths, rand);
|
|
104
|
+
const outcomes = generateSwarmOutcomes(rand);
|
|
105
|
+
const challenges = generateSwarmChallenges(agents, rand);
|
|
106
|
+
const recommendations = generateSwarmRecommendations(agents, paths, rand);
|
|
107
|
+
return {
|
|
108
|
+
patterns,
|
|
109
|
+
agentStrategies,
|
|
110
|
+
coalitions,
|
|
111
|
+
powerDynamics,
|
|
112
|
+
equilibrium,
|
|
113
|
+
paths,
|
|
114
|
+
matrix,
|
|
115
|
+
outcomes,
|
|
116
|
+
challenges,
|
|
117
|
+
recommendations,
|
|
118
|
+
detectedFormat: format,
|
|
119
|
+
formatLabel: simulationAdapter_1.FORMAT_LABELS[format],
|
|
120
|
+
normalized,
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
function generatePatterns(agents, rand) {
|
|
124
|
+
const patternTemplates = [
|
|
125
|
+
{ name: 'Convergent Herding', description: 'Multiple agents independently converge on similar strategies, creating a dominant behavioral cluster.' },
|
|
126
|
+
{ name: 'Oscillatory Competition', description: 'Agents cycle between cooperative and competitive strategies in a predictable pattern.' },
|
|
127
|
+
{ name: 'Resource Clustering', description: 'Agents concentrate around high-value nodes, creating territorial dynamics.' },
|
|
128
|
+
{ name: 'Cascade Adoption', description: 'Strategy adoption follows a cascade pattern — once critical mass is reached, remaining agents switch rapidly.' },
|
|
129
|
+
{ name: 'Minority Exploitation', description: 'A small group of agents consistently outperforms by exploiting the majority strategy.' },
|
|
130
|
+
{ name: 'Symmetry Breaking', description: 'Initially symmetric agents differentiate into specialized roles over time.' },
|
|
131
|
+
{ name: 'Delayed Retaliation', description: 'Agents employ tit-for-tat with delay, creating cycles of cooperation and defection.' },
|
|
132
|
+
];
|
|
133
|
+
const count = 3 + Math.floor(rand() * 3);
|
|
134
|
+
const strengths = ['strong', 'moderate', 'weak'];
|
|
135
|
+
return patternTemplates.slice(0, count).map(t => ({
|
|
136
|
+
...t,
|
|
137
|
+
strength: strengths[Math.floor(rand() * 3)],
|
|
138
|
+
agents: agents.filter(() => rand() > 0.4),
|
|
139
|
+
}));
|
|
140
|
+
}
|
|
141
|
+
function generateAgentStrategies(agents, rand) {
|
|
142
|
+
const strategies = [
|
|
143
|
+
{ strategy: 'Tit-for-Tat', rationale: 'Mirrors opponent behavior to incentivize cooperation while punishing defection.' },
|
|
144
|
+
{ strategy: 'Always Cooperate', rationale: 'Maximizes collective welfare at the cost of individual exploitation risk.' },
|
|
145
|
+
{ strategy: 'Pavlov (Win-Stay, Lose-Shift)', rationale: 'Repeats successful strategies and switches after failures — simple but adaptive.' },
|
|
146
|
+
{ strategy: 'Grim Trigger', rationale: 'Cooperates until defected against, then permanently defects — extreme deterrence.' },
|
|
147
|
+
{ strategy: 'Random Walk', rationale: 'Unpredictable strategy prevents exploitation but sacrifices optimization.' },
|
|
148
|
+
{ strategy: 'Generous Tit-for-Tat', rationale: 'Occasionally forgives defection to escape mutual punishment cycles.' },
|
|
149
|
+
{ strategy: 'Exploitative Defection', rationale: 'Systematically defects to extract maximum individual payoff from cooperators.' },
|
|
150
|
+
{ strategy: 'Adaptive Threshold', rationale: 'Adjusts cooperation threshold based on environment — cooperates in friendly environments, defects in hostile ones.' },
|
|
151
|
+
];
|
|
152
|
+
return agents.map(agent => {
|
|
153
|
+
const idx = Math.floor(rand() * strategies.length);
|
|
154
|
+
return {
|
|
155
|
+
agent,
|
|
156
|
+
strategy: strategies[idx].strategy,
|
|
157
|
+
rationale: strategies[idx].rationale,
|
|
158
|
+
effectiveness: Math.round(40 + rand() * 55),
|
|
159
|
+
};
|
|
160
|
+
});
|
|
161
|
+
}
|
|
162
|
+
function generateCoalitions(agents, rand) {
|
|
163
|
+
if (agents.length < 3)
|
|
164
|
+
return [];
|
|
165
|
+
const coalitions = [];
|
|
166
|
+
const objectives = [
|
|
167
|
+
'Resource maximization through coordinated strategy',
|
|
168
|
+
'Collective defense against exploitative agents',
|
|
169
|
+
'Market dominance through price coordination',
|
|
170
|
+
'Information sharing for competitive advantage',
|
|
171
|
+
];
|
|
172
|
+
const shuffled = [...agents].sort(() => rand() - 0.5);
|
|
173
|
+
const split = Math.floor(shuffled.length / 2);
|
|
174
|
+
coalitions.push({
|
|
175
|
+
name: 'Primary Coalition',
|
|
176
|
+
members: shuffled.slice(0, split + 1),
|
|
177
|
+
sharedObjective: objectives[Math.floor(rand() * objectives.length)],
|
|
178
|
+
cohesion: Math.round(50 + rand() * 45),
|
|
179
|
+
});
|
|
180
|
+
if (agents.length > 4) {
|
|
181
|
+
coalitions.push({
|
|
182
|
+
name: 'Counter-Coalition',
|
|
183
|
+
members: shuffled.slice(split + 1),
|
|
184
|
+
sharedObjective: objectives[Math.floor(rand() * objectives.length)],
|
|
185
|
+
cohesion: Math.round(30 + rand() * 50),
|
|
186
|
+
});
|
|
187
|
+
}
|
|
188
|
+
return coalitions;
|
|
189
|
+
}
|
|
190
|
+
function generatePowerDynamics(agents, rand) {
|
|
191
|
+
const roles = ['driver', 'reactor', 'stabilizer', 'disruptor'];
|
|
192
|
+
const descriptions = {
|
|
193
|
+
driver: ['Sets the strategic direction; other agents respond to this agent\'s moves.', 'Primary initiator of action; shapes the competitive landscape.'],
|
|
194
|
+
reactor: ['Responds to others\' strategies rather than initiating; adaptive but not proactive.', 'Follows dominant trends; optimizes within established dynamics.'],
|
|
195
|
+
stabilizer: ['Acts as a moderating force; dampens oscillations and promotes equilibrium.', 'Consistently chooses cooperative strategies; anchors the system.'],
|
|
196
|
+
disruptor: ['Introduces unpredictability; breaks established patterns and forces adaptation.', 'High-variance strategy creates opportunities and threats for all agents.'],
|
|
197
|
+
};
|
|
198
|
+
return agents.map(agent => {
|
|
199
|
+
const role = roles[Math.floor(rand() * roles.length)];
|
|
200
|
+
const descs = descriptions[role];
|
|
201
|
+
return {
|
|
202
|
+
agent,
|
|
203
|
+
role,
|
|
204
|
+
influence: Math.round(20 + rand() * 75),
|
|
205
|
+
description: descs[Math.floor(rand() * descs.length)],
|
|
206
|
+
};
|
|
207
|
+
});
|
|
208
|
+
}
|
|
209
|
+
function generateEquilibrium(agents, rand) {
|
|
210
|
+
const types = [
|
|
211
|
+
{ type: 'Nash Equilibrium', description: 'The system has reached a state where no agent can unilaterally improve their outcome by changing strategy. This equilibrium is self-enforcing but may not be globally optimal.' },
|
|
212
|
+
{ type: 'Pareto Suboptimal Equilibrium', description: 'Agents are locked in a stable but collectively suboptimal state. A coordinated deviation could improve all agents\' outcomes, but individual incentives prevent it.' },
|
|
213
|
+
{ type: 'Cyclical Dynamics', description: 'No stable equilibrium detected. The system oscillates between states as agents continuously adapt to each other\'s strategies.' },
|
|
214
|
+
{ type: 'Mixed Strategy Equilibrium', description: 'Agents randomize between strategies with specific probabilities. The equilibrium is maintained through uncertainty rather than fixed strategies.' },
|
|
215
|
+
];
|
|
216
|
+
const selected = types[Math.floor(rand() * types.length)];
|
|
217
|
+
const isStable = rand() > 0.4;
|
|
218
|
+
return {
|
|
219
|
+
isStable,
|
|
220
|
+
type: selected.type,
|
|
221
|
+
description: selected.description,
|
|
222
|
+
stabilityScore: Math.round(isStable ? 55 + rand() * 40 : 15 + rand() * 35),
|
|
223
|
+
vulnerabilities: [
|
|
224
|
+
'External shock could push agents to reassess strategies',
|
|
225
|
+
agents.length > 3 ? `Coalition between ${agents[0]} and ${agents[1]} could destabilize current equilibrium` : 'Small agent count increases sensitivity to individual strategy changes',
|
|
226
|
+
'Information asymmetry may be masking true strategic preferences',
|
|
227
|
+
].slice(0, 2 + Math.floor(rand() * 2)),
|
|
228
|
+
};
|
|
229
|
+
}
|
|
230
|
+
function generateSwarmPaths(agents, rand) {
|
|
231
|
+
const pathOptions = [
|
|
232
|
+
{ name: 'Coalition Reinforcement', description: 'Strengthen the dominant coalition by aligning incentives and reducing defection risk among members.' },
|
|
233
|
+
{ name: 'Disruptor Containment', description: 'Isolate disruptive agents through coordinated exclusion or counter-strategies.' },
|
|
234
|
+
{ name: 'Equilibrium Shift', description: 'Introduce strategic perturbation to move the system toward a more favorable equilibrium state.' },
|
|
235
|
+
{ name: 'Cooperation Incentive Design', description: 'Restructure payoff matrices to make cooperation individually rational, not just collectively optimal.' },
|
|
236
|
+
{ name: 'Adaptive Monitoring', description: 'Implement continuous strategy monitoring and adjust response in real-time based on agent behavior changes.' },
|
|
237
|
+
];
|
|
238
|
+
let totalProb = 0;
|
|
239
|
+
const paths = pathOptions.map((p, i) => {
|
|
240
|
+
const prob = Math.round(15 + rand() * 30);
|
|
241
|
+
totalProb += prob;
|
|
242
|
+
return {
|
|
243
|
+
...p,
|
|
244
|
+
probability: prob,
|
|
245
|
+
riskLevel: (rand() > 0.6 ? 'high' : rand() > 0.3 ? 'medium' : 'low'),
|
|
246
|
+
tradeoffs: [
|
|
247
|
+
`May alienate ${agents[Math.floor(rand() * agents.length)]}`,
|
|
248
|
+
'Requires sustained coordination to maintain effectiveness',
|
|
249
|
+
'Outcome depends on accurate agent behavior modeling',
|
|
250
|
+
].slice(0, 2 + Math.floor(rand() * 2)),
|
|
251
|
+
recommended: i === 0,
|
|
252
|
+
};
|
|
253
|
+
});
|
|
254
|
+
paths.forEach(p => { p.probability = Math.round((p.probability / totalProb) * 100); });
|
|
255
|
+
return paths;
|
|
256
|
+
}
|
|
257
|
+
function generateSwarmMatrix(agents, paths, rand) {
|
|
258
|
+
const strategies = paths.slice(0, 3).map(p => p.name);
|
|
259
|
+
const reactions = agents.map(() => strategies.map(() => {
|
|
260
|
+
const r = rand();
|
|
261
|
+
return r > 0.55 ? 'positive' : r > 0.25 ? 'neutral' : 'negative';
|
|
262
|
+
}));
|
|
263
|
+
return { strategies, stakeholders: agents, reactions };
|
|
264
|
+
}
|
|
265
|
+
function generateSwarmOutcomes(rand) {
|
|
266
|
+
return [
|
|
267
|
+
{ label: 'Best Case', type: 'best', conditions: 'All agents converge on cooperative equilibrium', description: 'System achieves globally optimal state; all agents benefit from sustained cooperation.', likelihood: Math.round(10 + rand() * 15), impactRating: Math.round(2 + rand() * 2) },
|
|
268
|
+
{ label: 'Most Likely', type: 'likely', conditions: 'Partial cooperation with persistent free-riding', description: 'Dominant coalition maintains advantage while fringe agents exploit cooperative norms.', likelihood: Math.round(35 + rand() * 20), impactRating: Math.round(4 + rand() * 2) },
|
|
269
|
+
{ label: 'Worst Case', type: 'worst', conditions: 'Coalition fracture and competitive spiral', description: 'Trust breakdown triggers mutual defection; system degrades to non-cooperative equilibrium.', likelihood: Math.round(10 + rand() * 15), impactRating: Math.round(7 + rand() * 2) },
|
|
270
|
+
{ label: 'Tail Risk', type: 'tail', conditions: 'Emergent adversarial strategy defeats all known counters', description: 'Novel agent strategy exploits systemic vulnerability; cascading failures across the agent network.', likelihood: Math.round(2 + rand() * 8), impactRating: Math.round(9 + rand()) },
|
|
271
|
+
];
|
|
272
|
+
}
|
|
273
|
+
function generateSwarmChallenges(agents, rand) {
|
|
274
|
+
const challenges = [
|
|
275
|
+
{ assumption: 'Agents behave rationally', whyItMightFail: 'Bounded rationality, information delays, and emergent complexity produce behaviors that deviate from game-theoretic optimality.', whatChanges: 'Predictive models may fail; heuristic-based analysis becomes necessary.', severity: 'high' },
|
|
276
|
+
{ assumption: 'Agent strategies are observable', whyItMightFail: 'Agents may employ deceptive signaling, masking true strategies behind observable actions.', whatChanges: 'Strategy inference requires deeper behavioral analysis; surface patterns may mislead.', severity: 'medium' },
|
|
277
|
+
{ assumption: 'The environment is stationary', whyItMightFail: 'Payoff structures and constraints may shift during the simulation, invalidating equilibrium analysis.', whatChanges: 'Continuous recalibration needed; static equilibrium analysis insufficient.', severity: 'high' },
|
|
278
|
+
{ assumption: `${agents[0]} will maintain current strategy`, whyItMightFail: 'Agents adapt over time; current strategy may be transitional rather than stable.', whatChanges: 'Must model strategy evolution, not just current state.', severity: 'medium' },
|
|
279
|
+
{ assumption: 'Coalition structures are stable', whyItMightFail: 'Internal incentive misalignment may cause coalition fracture under pressure.', whatChanges: 'Coalition durability should be stress-tested; contingency plans for fracture needed.', severity: 'critical' },
|
|
280
|
+
];
|
|
281
|
+
return challenges.slice(0, 3 + Math.floor(rand() * 3));
|
|
282
|
+
}
|
|
283
|
+
function generateSwarmRecommendations(agents, paths, rand) {
|
|
284
|
+
const rec = paths.find(p => p.recommended) || paths[0];
|
|
285
|
+
return [
|
|
286
|
+
{ priority: 1, action: `Implement ${rec.name}`, rationale: `Highest-probability intervention (${rec.probability}%) for steering swarm behavior.`, timeframe: 'Immediate' },
|
|
287
|
+
{ priority: 2, action: 'Monitor coalition stability metrics', rationale: 'Early detection of coalition fracture enables preemptive intervention.', timeframe: 'Continuous' },
|
|
288
|
+
{ priority: 3, action: `Address ${agents[agents.length - 1]} behavior pattern`, rationale: 'Outlier agent behavior may indicate either exploitation or innovation; requires assessment.', timeframe: 'Within next cycle' },
|
|
289
|
+
{ priority: 4, action: 'Run sensitivity analysis on payoff structure', rationale: 'Small payoff changes may trigger large behavioral shifts; identify critical thresholds.', timeframe: 'Before next iteration' },
|
|
290
|
+
];
|
|
291
|
+
}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.cn = cn;
|
|
4
|
+
const clsx_1 = require("clsx");
|
|
5
|
+
const tailwind_merge_1 = require("tailwind-merge");
|
|
6
|
+
function cn(...inputs) {
|
|
7
|
+
return (0, tailwind_merge_1.twMerge)((0, clsx_1.clsx)(inputs));
|
|
8
|
+
}
|