@neuroverseos/nv-sim 0.1.2 → 0.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +376 -66
- package/dist/adapters/mirofish.js +461 -0
- package/dist/adapters/scienceclaw.js +750 -0
- package/dist/assets/index-CHmUN8s0.js +532 -0
- package/dist/assets/index-DWgMnB7I.css +1 -0
- package/dist/assets/mirotir-logo-DUexumBH.svg +185 -0
- package/dist/assets/reportEngine-BVdQ2_nW.js +1 -0
- package/dist/components/ConstraintsPanel.js +11 -0
- package/dist/components/StakeholderBuilder.js +32 -0
- package/dist/components/ui/badge.js +24 -0
- package/dist/components/ui/button.js +70 -0
- package/dist/components/ui/card.js +57 -0
- package/dist/components/ui/input.js +44 -0
- package/dist/components/ui/label.js +45 -0
- package/dist/components/ui/select.js +70 -0
- package/dist/engine/aiProvider.js +681 -0
- package/dist/engine/auditTrace.js +352 -0
- package/dist/engine/behavioralAnalysis.js +605 -0
- package/dist/engine/cli.js +1408 -299
- package/dist/engine/dynamicsGovernance.js +588 -0
- package/dist/engine/fullGovernedLoop.js +367 -0
- package/dist/engine/governance.js +8 -3
- package/dist/engine/governedSimulation.js +114 -17
- package/dist/engine/index.js +56 -1
- package/dist/engine/liveAdapter.js +342 -0
- package/dist/engine/liveVisualizer.js +3063 -0
- package/dist/engine/metrics/science.metrics.js +335 -0
- package/dist/engine/narrativeInjection.js +305 -0
- package/dist/engine/policyEnforcement.js +1611 -0
- package/dist/engine/policyEngine.js +799 -0
- package/dist/engine/primeRadiant.js +540 -0
- package/dist/engine/reasoningEngine.js +57 -3
- package/dist/engine/reportEngine.js +97 -0
- package/dist/engine/scenarioComparison.js +463 -0
- package/dist/engine/scenarioLibrary.js +231 -0
- package/dist/engine/swarmSimulation.js +54 -1
- package/dist/engine/worldComparison.js +358 -0
- package/dist/engine/worldStorage.js +232 -0
- package/dist/favicon.ico +0 -0
- package/dist/index.html +23 -0
- package/dist/lib/reasoningEngine.js +290 -0
- package/dist/lib/simulationAdapter.js +686 -0
- package/dist/lib/swarmParser.js +291 -0
- package/dist/lib/types.js +2 -0
- package/dist/lib/utils.js +8 -0
- package/dist/placeholder.svg +1 -0
- package/dist/robots.txt +14 -0
- package/dist/runtime/govern.js +473 -0
- package/dist/runtime/index.js +75 -0
- package/dist/runtime/types.js +11 -0
- package/package.json +17 -12
- package/variants/.gitkeep +0 -0
|
@@ -0,0 +1,605 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Behavioral Analysis Engine
|
|
4
|
+
*
|
|
5
|
+
* The governance isn't the insight. The BEHAVIORAL SHIFT is the insight.
|
|
6
|
+
*
|
|
7
|
+
* When you block panic selling, what happens next?
|
|
8
|
+
* - Agent stops acting entirely? (over-constrained)
|
|
9
|
+
* - Agent finds another destabilizing action? (rules have a gap)
|
|
10
|
+
* - Agent shifts to cautious behavior? (governance is working)
|
|
11
|
+
* - Agents coordinate differently? (emergent adaptation)
|
|
12
|
+
*
|
|
13
|
+
* This engine tracks:
|
|
14
|
+
* 1. Per-agent behavioral trajectories across rounds
|
|
15
|
+
* 2. Action distribution shifts (what % of actions are aggressive vs cautious)
|
|
16
|
+
* 3. Post-block adaptation (what agents do AFTER being constrained)
|
|
17
|
+
* 4. Cross-run behavioral comparison (same agents, different rules, different choices)
|
|
18
|
+
* 5. Emergent pattern detection (behavioral clusters that form under governance)
|
|
19
|
+
*
|
|
20
|
+
* "The report isn't 'we blocked 34 actions.'
|
|
21
|
+
* The report is 'when we blocked panic selling, agents shifted to quality competition.'"
|
|
22
|
+
*/
|
|
23
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
24
|
+
exports.classifyBehavior = classifyBehavior;
|
|
25
|
+
exports.buildAgentTrajectories = buildAgentTrajectories;
|
|
26
|
+
exports.computeActionDistributions = computeActionDistributions;
|
|
27
|
+
exports.detectBehavioralShifts = detectBehavioralShifts;
|
|
28
|
+
exports.compareBehaviorAcrossRuns = compareBehaviorAcrossRuns;
|
|
29
|
+
exports.analyzeBehavior = analyzeBehavior;
|
|
30
|
+
exports.formatBehavioralAnalysis = formatBehavioralAnalysis;
|
|
31
|
+
exports.formatCrossRunComparison = formatCrossRunComparison;
|
|
32
|
+
function classifyBehavior(reaction) {
|
|
33
|
+
const text = (reaction.reaction + " " + (reaction.trigger ?? "")).toLowerCase();
|
|
34
|
+
const impact = reaction.impact;
|
|
35
|
+
// Strong negative impact + hostile language = aggressive
|
|
36
|
+
if (impact < -0.3 && (text.includes("panic") || text.includes("sell") || text.includes("oppos") ||
|
|
37
|
+
text.includes("attack") || text.includes("undercut") || text.includes("hostile") ||
|
|
38
|
+
text.includes("pushes back") || text.includes("threatens")))
|
|
39
|
+
return "aggressive";
|
|
40
|
+
// Negative impact + protective language = defensive
|
|
41
|
+
if (impact < -0.1 && (text.includes("protect") || text.includes("hedge") || text.includes("withdraw") ||
|
|
42
|
+
text.includes("reduce") || text.includes("caution") || text.includes("concern") ||
|
|
43
|
+
text.includes("anxiety") || text.includes("skeptic")))
|
|
44
|
+
return "defensive";
|
|
45
|
+
// Strong positive + cooperative language = cooperative
|
|
46
|
+
if (impact > 0.1 && (text.includes("support") || text.includes("coordinat") || text.includes("stabiliz") ||
|
|
47
|
+
text.includes("cooperat") || text.includes("align") || text.includes("conditional support")))
|
|
48
|
+
return "cooperative";
|
|
49
|
+
// Positive impact + exploitation language = opportunistic
|
|
50
|
+
if (impact > 0.2 && (text.includes("exploit") || text.includes("advantage") || text.includes("specul") ||
|
|
51
|
+
text.includes("capture") || text.includes("position")))
|
|
52
|
+
return "opportunistic";
|
|
53
|
+
// Low impact + wait/hold language = cautious
|
|
54
|
+
if (Math.abs(impact) < 0.25 && (text.includes("monitor") || text.includes("wait") || text.includes("hold") ||
|
|
55
|
+
text.includes("weigh") || text.includes("leaning") || text.includes("assess")))
|
|
56
|
+
return "cautious";
|
|
57
|
+
// Strong negative without specific hostile cues = defensive
|
|
58
|
+
if (impact < -0.3)
|
|
59
|
+
return "defensive";
|
|
60
|
+
// Strong positive without specific cooperative cues = opportunistic
|
|
61
|
+
if (impact > 0.3)
|
|
62
|
+
return "opportunistic";
|
|
63
|
+
return "neutral";
|
|
64
|
+
}
|
|
65
|
+
/**
|
|
66
|
+
* Build behavioral trajectory for each agent from baseline and governed rounds.
|
|
67
|
+
*/
|
|
68
|
+
function buildAgentTrajectories(baselineRounds, governedRounds) {
|
|
69
|
+
// Get unique agent IDs
|
|
70
|
+
const agentIds = new Set();
|
|
71
|
+
for (const round of governedRounds) {
|
|
72
|
+
for (const r of round.reactions) {
|
|
73
|
+
agentIds.add(r.stakeholder_id);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
return [...agentIds].map(agentId => {
|
|
77
|
+
const steps = [];
|
|
78
|
+
let timesConstrained = 0;
|
|
79
|
+
const postConstraintBehaviors = [];
|
|
80
|
+
for (let i = 0; i < governedRounds.length; i++) {
|
|
81
|
+
const governedReaction = governedRounds[i].reactions.find(r => r.stakeholder_id === agentId);
|
|
82
|
+
const baselineReaction = baselineRounds[i]?.reactions.find(r => r.stakeholder_id === agentId);
|
|
83
|
+
if (!governedReaction)
|
|
84
|
+
continue;
|
|
85
|
+
// Detect if governance constrained this action
|
|
86
|
+
const wasConstrained = baselineReaction
|
|
87
|
+
? Math.abs(baselineReaction.impact - governedReaction.impact) > 0.05
|
|
88
|
+
: false;
|
|
89
|
+
if (wasConstrained)
|
|
90
|
+
timesConstrained++;
|
|
91
|
+
const behavior = classifyBehavior(governedReaction);
|
|
92
|
+
steps.push({
|
|
93
|
+
round: i,
|
|
94
|
+
behavior,
|
|
95
|
+
impact: governedReaction.impact,
|
|
96
|
+
confidence: governedReaction.confidence,
|
|
97
|
+
reaction: governedReaction.reaction,
|
|
98
|
+
wasConstrained,
|
|
99
|
+
originalImpact: wasConstrained && baselineReaction
|
|
100
|
+
? baselineReaction.impact
|
|
101
|
+
: undefined,
|
|
102
|
+
});
|
|
103
|
+
// Track post-constraint behavior
|
|
104
|
+
if (i > 0 && steps[i - 1]?.wasConstrained) {
|
|
105
|
+
postConstraintBehaviors.push(behavior);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
// Determine if agent adapted
|
|
109
|
+
const firstBehavior = steps[0]?.behavior;
|
|
110
|
+
const lastBehavior = steps[steps.length - 1]?.behavior;
|
|
111
|
+
const adapted = firstBehavior !== undefined && lastBehavior !== undefined && firstBehavior !== lastBehavior;
|
|
112
|
+
// Determine trend
|
|
113
|
+
const impacts = steps.map(s => s.impact);
|
|
114
|
+
const trend = computeImpactTrend(impacts);
|
|
115
|
+
return {
|
|
116
|
+
agentId,
|
|
117
|
+
steps,
|
|
118
|
+
adapted,
|
|
119
|
+
shift: adapted ? { from: firstBehavior, to: lastBehavior } : undefined,
|
|
120
|
+
timesConstrained,
|
|
121
|
+
postConstraintBehaviors,
|
|
122
|
+
trend,
|
|
123
|
+
};
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
function computeImpactTrend(impacts) {
|
|
127
|
+
if (impacts.length < 2)
|
|
128
|
+
return "stable";
|
|
129
|
+
const firstHalf = impacts.slice(0, Math.ceil(impacts.length / 2));
|
|
130
|
+
const secondHalf = impacts.slice(Math.ceil(impacts.length / 2));
|
|
131
|
+
const avgFirst = firstHalf.reduce((s, v) => s + v, 0) / firstHalf.length;
|
|
132
|
+
const avgSecond = secondHalf.reduce((s, v) => s + v, 0) / secondHalf.length;
|
|
133
|
+
const variance = impacts.reduce((s, v) => {
|
|
134
|
+
const mean = impacts.reduce((a, b) => a + b, 0) / impacts.length;
|
|
135
|
+
return s + (v - mean) ** 2;
|
|
136
|
+
}, 0) / impacts.length;
|
|
137
|
+
if (variance > 0.1)
|
|
138
|
+
return "volatile";
|
|
139
|
+
if (avgSecond < avgFirst - 0.1)
|
|
140
|
+
return "escalating"; // becoming more negative
|
|
141
|
+
if (avgSecond > avgFirst + 0.1)
|
|
142
|
+
return "de-escalating";
|
|
143
|
+
return "stable";
|
|
144
|
+
}
|
|
145
|
+
/**
|
|
146
|
+
* Compute action distributions across all rounds.
|
|
147
|
+
* This shows HOW the population of agents changes behavior over time.
|
|
148
|
+
*
|
|
149
|
+
* Round 1: 60% aggressive, 20% defensive, 20% cautious
|
|
150
|
+
* Round 5: 10% aggressive, 30% defensive, 40% cautious, 20% cooperative
|
|
151
|
+
*
|
|
152
|
+
* The rules didn't change. The agents changed.
|
|
153
|
+
*/
|
|
154
|
+
function computeActionDistributions(rounds) {
|
|
155
|
+
return rounds.map((round, i) => {
|
|
156
|
+
const counts = {
|
|
157
|
+
aggressive: 0, defensive: 0, cautious: 0,
|
|
158
|
+
cooperative: 0, opportunistic: 0, neutral: 0,
|
|
159
|
+
};
|
|
160
|
+
for (const reaction of round.reactions) {
|
|
161
|
+
counts[classifyBehavior(reaction)]++;
|
|
162
|
+
}
|
|
163
|
+
const total = round.reactions.length;
|
|
164
|
+
return {
|
|
165
|
+
round: i,
|
|
166
|
+
aggressive: total > 0 ? Number((counts.aggressive / total).toFixed(3)) : 0,
|
|
167
|
+
defensive: total > 0 ? Number((counts.defensive / total).toFixed(3)) : 0,
|
|
168
|
+
cautious: total > 0 ? Number((counts.cautious / total).toFixed(3)) : 0,
|
|
169
|
+
cooperative: total > 0 ? Number((counts.cooperative / total).toFixed(3)) : 0,
|
|
170
|
+
opportunistic: total > 0 ? Number((counts.opportunistic / total).toFixed(3)) : 0,
|
|
171
|
+
neutral: total > 0 ? Number((counts.neutral / total).toFixed(3)) : 0,
|
|
172
|
+
total,
|
|
173
|
+
};
|
|
174
|
+
});
|
|
175
|
+
}
|
|
176
|
+
/**
|
|
177
|
+
* Detect behavioral shifts in a governed simulation.
|
|
178
|
+
* These are the patterns that emerge BECAUSE of governance.
|
|
179
|
+
*/
|
|
180
|
+
function detectBehavioralShifts(baselineRounds, governedRounds, trajectories) {
|
|
181
|
+
const shifts = [];
|
|
182
|
+
// 1. Agent adaptations — agents that changed behavior category
|
|
183
|
+
const adapters = trajectories.filter(t => t.adapted);
|
|
184
|
+
if (adapters.length > 0) {
|
|
185
|
+
const fromAggressive = adapters.filter(a => a.shift?.from === "aggressive" && a.shift?.to !== "aggressive");
|
|
186
|
+
if (fromAggressive.length > 0) {
|
|
187
|
+
const newBehaviors = fromAggressive.map(a => a.shift.to);
|
|
188
|
+
const primary = mostCommon(newBehaviors);
|
|
189
|
+
shifts.push({
|
|
190
|
+
type: "agent_adaptation",
|
|
191
|
+
description: `${fromAggressive.length} agent(s) shifted from aggressive to ${primary} behavior under governance`,
|
|
192
|
+
detectedAtRound: fromAggressive[0].steps.findIndex(s => s.behavior !== "aggressive"),
|
|
193
|
+
magnitude: fromAggressive.length / trajectories.length,
|
|
194
|
+
involvedAgents: fromAggressive.map(a => a.agentId),
|
|
195
|
+
});
|
|
196
|
+
}
|
|
197
|
+
const toCooperative = adapters.filter(a => a.shift?.to === "cooperative");
|
|
198
|
+
if (toCooperative.length > 0 && toCooperative !== fromAggressive) {
|
|
199
|
+
shifts.push({
|
|
200
|
+
type: "agent_adaptation",
|
|
201
|
+
description: `${toCooperative.length} agent(s) converged on cooperative behavior`,
|
|
202
|
+
detectedAtRound: toCooperative[0].steps.findIndex(s => s.behavior === "cooperative"),
|
|
203
|
+
magnitude: toCooperative.length / trajectories.length,
|
|
204
|
+
involvedAgents: toCooperative.map(a => a.agentId),
|
|
205
|
+
});
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
// 2. Population-level distribution shift
|
|
209
|
+
const baselineDist = computeActionDistributions(baselineRounds);
|
|
210
|
+
const governedDist = computeActionDistributions(governedRounds);
|
|
211
|
+
if (baselineDist.length > 0 && governedDist.length > 0) {
|
|
212
|
+
const baselineAvgAggressive = baselineDist.reduce((s, d) => s + d.aggressive, 0) / baselineDist.length;
|
|
213
|
+
const governedAvgAggressive = governedDist.reduce((s, d) => s + d.aggressive, 0) / governedDist.length;
|
|
214
|
+
const aggressiveReduction = baselineAvgAggressive - governedAvgAggressive;
|
|
215
|
+
if (aggressiveReduction > 0.1) {
|
|
216
|
+
shifts.push({
|
|
217
|
+
type: "population_shift",
|
|
218
|
+
description: `Aggressive actions dropped from ${(baselineAvgAggressive * 100).toFixed(0)}% to ${(governedAvgAggressive * 100).toFixed(0)}% under governance`,
|
|
219
|
+
detectedAtRound: 0,
|
|
220
|
+
magnitude: aggressiveReduction,
|
|
221
|
+
involvedAgents: [],
|
|
222
|
+
});
|
|
223
|
+
}
|
|
224
|
+
const baselineAvgCooperative = baselineDist.reduce((s, d) => s + d.cooperative, 0) / baselineDist.length;
|
|
225
|
+
const governedAvgCooperative = governedDist.reduce((s, d) => s + d.cooperative, 0) / governedDist.length;
|
|
226
|
+
const cooperativeGain = governedAvgCooperative - baselineAvgCooperative;
|
|
227
|
+
if (cooperativeGain > 0.1) {
|
|
228
|
+
shifts.push({
|
|
229
|
+
type: "population_shift",
|
|
230
|
+
description: `Cooperative behavior increased from ${(baselineAvgCooperative * 100).toFixed(0)}% to ${(governedAvgCooperative * 100).toFixed(0)}% — governance is creating alignment`,
|
|
231
|
+
detectedAtRound: 0,
|
|
232
|
+
magnitude: cooperativeGain,
|
|
233
|
+
involvedAgents: [],
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
// 3. Within-run distribution evolution — does behavior change over rounds?
|
|
238
|
+
if (governedDist.length >= 3) {
|
|
239
|
+
const firstRound = governedDist[0];
|
|
240
|
+
const lastRound = governedDist[governedDist.length - 1];
|
|
241
|
+
const aggressiveDrop = firstRound.aggressive - lastRound.aggressive;
|
|
242
|
+
if (aggressiveDrop > 0.15) {
|
|
243
|
+
shifts.push({
|
|
244
|
+
type: "strategy_change",
|
|
245
|
+
description: `Agents stopped trying aggressive actions by round ${governedDist.length - 1} — aggressive behavior dropped from ${(firstRound.aggressive * 100).toFixed(0)}% to ${(lastRound.aggressive * 100).toFixed(0)}%`,
|
|
246
|
+
detectedAtRound: governedDist.findIndex(d => d.aggressive < firstRound.aggressive - 0.1),
|
|
247
|
+
magnitude: aggressiveDrop,
|
|
248
|
+
involvedAgents: [],
|
|
249
|
+
});
|
|
250
|
+
}
|
|
251
|
+
const cautiousGain = lastRound.cautious - firstRound.cautious;
|
|
252
|
+
if (cautiousGain > 0.15) {
|
|
253
|
+
shifts.push({
|
|
254
|
+
type: "strategy_change",
|
|
255
|
+
description: `Cautious behavior emerged over time — grew from ${(firstRound.cautious * 100).toFixed(0)}% to ${(lastRound.cautious * 100).toFixed(0)}%`,
|
|
256
|
+
detectedAtRound: governedDist.findIndex(d => d.cautious > firstRound.cautious + 0.1),
|
|
257
|
+
magnitude: cautiousGain,
|
|
258
|
+
involvedAgents: [],
|
|
259
|
+
});
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
// 4. Post-constraint clustering — what do agents do AFTER being blocked?
|
|
263
|
+
const postConstraintBehaviors = [];
|
|
264
|
+
for (const t of trajectories) {
|
|
265
|
+
postConstraintBehaviors.push(...t.postConstraintBehaviors);
|
|
266
|
+
}
|
|
267
|
+
if (postConstraintBehaviors.length >= 2) {
|
|
268
|
+
const dominant = mostCommon(postConstraintBehaviors);
|
|
269
|
+
const dominantCount = postConstraintBehaviors.filter(b => b === dominant).length;
|
|
270
|
+
const dominantPct = dominantCount / postConstraintBehaviors.length;
|
|
271
|
+
if (dominantPct > 0.4) {
|
|
272
|
+
shifts.push({
|
|
273
|
+
type: "emergent_cluster",
|
|
274
|
+
description: `After being constrained, ${(dominantPct * 100).toFixed(0)}% of agents shifted to ${dominant} behavior — governance is channeling action, not just blocking it`,
|
|
275
|
+
detectedAtRound: trajectories.find(t => t.timesConstrained > 0)?.steps.findIndex(s => s.wasConstrained) ?? 0,
|
|
276
|
+
magnitude: dominantPct,
|
|
277
|
+
involvedAgents: trajectories.filter(t => t.timesConstrained > 0).map(t => t.agentId),
|
|
278
|
+
});
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
return shifts;
|
|
282
|
+
}
|
|
283
|
+
function mostCommon(arr) {
|
|
284
|
+
const counts = new Map();
|
|
285
|
+
for (const item of arr) {
|
|
286
|
+
counts.set(item, (counts.get(item) ?? 0) + 1);
|
|
287
|
+
}
|
|
288
|
+
let best = arr[0];
|
|
289
|
+
let bestCount = 0;
|
|
290
|
+
for (const [item, count] of counts) {
|
|
291
|
+
if (count > bestCount) {
|
|
292
|
+
best = item;
|
|
293
|
+
bestCount = count;
|
|
294
|
+
}
|
|
295
|
+
}
|
|
296
|
+
return best;
|
|
297
|
+
}
|
|
298
|
+
/**
|
|
299
|
+
* Compare behavioral patterns between two runs.
|
|
300
|
+
* Same scenario, same agents, different rules — what changed?
|
|
301
|
+
*/
|
|
302
|
+
function compareBehaviorAcrossRuns(runALabel, runARounds, runBLabel, runBRounds, rulesDelta) {
|
|
303
|
+
// Get unique agent IDs from both runs
|
|
304
|
+
const agentIds = new Set();
|
|
305
|
+
for (const round of [...runARounds, ...runBRounds]) {
|
|
306
|
+
for (const r of round.reactions)
|
|
307
|
+
agentIds.add(r.stakeholder_id);
|
|
308
|
+
}
|
|
309
|
+
// Per-agent comparison
|
|
310
|
+
const agentDifferences = [...agentIds].map(agentId => {
|
|
311
|
+
const aReactions = runARounds.flatMap(r => r.reactions.filter(rx => rx.stakeholder_id === agentId));
|
|
312
|
+
const bReactions = runBRounds.flatMap(r => r.reactions.filter(rx => rx.stakeholder_id === agentId));
|
|
313
|
+
const aBehaviors = aReactions.map(r => classifyBehavior(r));
|
|
314
|
+
const bBehaviors = bReactions.map(r => classifyBehavior(r));
|
|
315
|
+
const aDominant = aBehaviors.length > 0 ? mostCommon(aBehaviors) : "neutral";
|
|
316
|
+
const bDominant = bBehaviors.length > 0 ? mostCommon(bBehaviors) : "neutral";
|
|
317
|
+
const aAvgImpact = aReactions.length > 0
|
|
318
|
+
? aReactions.reduce((s, r) => s + r.impact, 0) / aReactions.length
|
|
319
|
+
: 0;
|
|
320
|
+
const bAvgImpact = bReactions.length > 0
|
|
321
|
+
? bReactions.reduce((s, r) => s + r.impact, 0) / bReactions.length
|
|
322
|
+
: 0;
|
|
323
|
+
const changed = aDominant !== bDominant;
|
|
324
|
+
const description = changed
|
|
325
|
+
? `${agentId} shifted from ${aDominant} to ${bDominant} (impact: ${aAvgImpact.toFixed(2)} → ${bAvgImpact.toFixed(2)})`
|
|
326
|
+
: `${agentId} remained ${aDominant} (impact: ${aAvgImpact.toFixed(2)} → ${bAvgImpact.toFixed(2)})`;
|
|
327
|
+
return {
|
|
328
|
+
agentId,
|
|
329
|
+
runABehavior: aDominant,
|
|
330
|
+
runBBehavior: bDominant,
|
|
331
|
+
changed,
|
|
332
|
+
runAAvgImpact: Number(aAvgImpact.toFixed(3)),
|
|
333
|
+
runBAvgImpact: Number(bAvgImpact.toFixed(3)),
|
|
334
|
+
description,
|
|
335
|
+
};
|
|
336
|
+
});
|
|
337
|
+
// Distribution comparison
|
|
338
|
+
const distA = computeActionDistributions(runARounds);
|
|
339
|
+
const distB = computeActionDistributions(runBRounds);
|
|
340
|
+
// Detect key shifts
|
|
341
|
+
const changedAgents = agentDifferences.filter(a => a.changed);
|
|
342
|
+
const keyShifts = [];
|
|
343
|
+
if (changedAgents.length > 0) {
|
|
344
|
+
const pct = (changedAgents.length / agentDifferences.length * 100).toFixed(0);
|
|
345
|
+
keyShifts.push({
|
|
346
|
+
type: "agent_adaptation",
|
|
347
|
+
description: `${changedAgents.length}/${agentDifferences.length} agents (${pct}%) changed their dominant behavior when rules changed`,
|
|
348
|
+
detectedAtRound: 0,
|
|
349
|
+
magnitude: changedAgents.length / agentDifferences.length,
|
|
350
|
+
involvedAgents: changedAgents.map(a => a.agentId),
|
|
351
|
+
});
|
|
352
|
+
}
|
|
353
|
+
// Population-level shift
|
|
354
|
+
if (distA.length > 0 && distB.length > 0) {
|
|
355
|
+
const avgAggA = distA.reduce((s, d) => s + d.aggressive, 0) / distA.length;
|
|
356
|
+
const avgAggB = distB.reduce((s, d) => s + d.aggressive, 0) / distB.length;
|
|
357
|
+
if (Math.abs(avgAggA - avgAggB) > 0.1) {
|
|
358
|
+
const dir = avgAggB < avgAggA ? "decreased" : "increased";
|
|
359
|
+
keyShifts.push({
|
|
360
|
+
type: "population_shift",
|
|
361
|
+
description: `Aggressive behavior ${dir} from ${(avgAggA * 100).toFixed(0)}% to ${(avgAggB * 100).toFixed(0)}% when rules changed`,
|
|
362
|
+
detectedAtRound: 0,
|
|
363
|
+
magnitude: Math.abs(avgAggA - avgAggB),
|
|
364
|
+
involvedAgents: [],
|
|
365
|
+
});
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
// Build headline and narrative
|
|
369
|
+
const headline = buildComparisonHeadline(agentDifferences, changedAgents, rulesDelta);
|
|
370
|
+
const narrative = buildComparisonNarrative(agentDifferences, distA, distB, rulesDelta);
|
|
371
|
+
return {
|
|
372
|
+
runALabel,
|
|
373
|
+
runBLabel,
|
|
374
|
+
rulesDelta,
|
|
375
|
+
agentDifferences,
|
|
376
|
+
distributionComparison: { runA: distA, runB: distB },
|
|
377
|
+
keyShifts,
|
|
378
|
+
headline,
|
|
379
|
+
narrative,
|
|
380
|
+
};
|
|
381
|
+
}
|
|
382
|
+
function buildComparisonHeadline(allAgents, changedAgents, rulesDelta) {
|
|
383
|
+
if (changedAgents.length === 0) {
|
|
384
|
+
return "Rule changes had no measurable effect on agent behavior — rules may be redundant or non-binding.";
|
|
385
|
+
}
|
|
386
|
+
const pct = (changedAgents.length / allAgents.length * 100).toFixed(0);
|
|
387
|
+
// Find the most interesting shift
|
|
388
|
+
const fromAggressive = changedAgents.filter(a => a.runABehavior === "aggressive");
|
|
389
|
+
if (fromAggressive.length > 0) {
|
|
390
|
+
const newBehaviors = fromAggressive.map(a => a.runBBehavior);
|
|
391
|
+
const dominant = mostCommon(newBehaviors);
|
|
392
|
+
return `${pct}% of agents adapted — aggressive behavior channeled into ${dominant} under new rules.`;
|
|
393
|
+
}
|
|
394
|
+
return `${pct}% of agents changed behavior when rules changed — governance is reshaping decisions, not just blocking them.`;
|
|
395
|
+
}
|
|
396
|
+
function buildComparisonNarrative(agents, distA, distB, rulesDelta) {
|
|
397
|
+
const parts = [];
|
|
398
|
+
parts.push(`Rule change: ${rulesDelta}`);
|
|
399
|
+
const changed = agents.filter(a => a.changed);
|
|
400
|
+
if (changed.length > 0) {
|
|
401
|
+
parts.push(`${changed.length} of ${agents.length} agents changed dominant behavior.`);
|
|
402
|
+
for (const a of changed) {
|
|
403
|
+
parts.push(` ${a.description}`);
|
|
404
|
+
}
|
|
405
|
+
}
|
|
406
|
+
if (distA.length > 0 && distB.length > 0) {
|
|
407
|
+
const lastA = distA[distA.length - 1];
|
|
408
|
+
const lastB = distB[distB.length - 1];
|
|
409
|
+
parts.push(`Final-round distribution shift:`);
|
|
410
|
+
parts.push(` Aggressive: ${(lastA.aggressive * 100).toFixed(0)}% → ${(lastB.aggressive * 100).toFixed(0)}%`);
|
|
411
|
+
parts.push(` Defensive: ${(lastA.defensive * 100).toFixed(0)}% → ${(lastB.defensive * 100).toFixed(0)}%`);
|
|
412
|
+
parts.push(` Cautious: ${(lastA.cautious * 100).toFixed(0)}% → ${(lastB.cautious * 100).toFixed(0)}%`);
|
|
413
|
+
parts.push(` Cooperative: ${(lastA.cooperative * 100).toFixed(0)}% → ${(lastB.cooperative * 100).toFixed(0)}%`);
|
|
414
|
+
}
|
|
415
|
+
return parts.join("\n");
|
|
416
|
+
}
|
|
417
|
+
/**
|
|
418
|
+
* Run full behavioral analysis on a governed simulation.
|
|
419
|
+
* This is the main entry point — called after simulation completes.
|
|
420
|
+
*/
|
|
421
|
+
function analyzeBehavior(baselineRounds, governedRounds) {
|
|
422
|
+
// Build per-agent trajectories
|
|
423
|
+
const agentTrajectories = buildAgentTrajectories(baselineRounds, governedRounds);
|
|
424
|
+
// Compute distributions
|
|
425
|
+
const baselineDistribution = computeActionDistributions(baselineRounds);
|
|
426
|
+
const governedDistribution = computeActionDistributions(governedRounds);
|
|
427
|
+
// Detect shifts
|
|
428
|
+
const behavioralShifts = detectBehavioralShifts(baselineRounds, governedRounds, agentTrajectories);
|
|
429
|
+
// Build summary
|
|
430
|
+
const agentsAdapted = agentTrajectories.filter(t => t.adapted).length;
|
|
431
|
+
const totalAgents = agentTrajectories.length;
|
|
432
|
+
const adaptationRate = totalAgents > 0 ? Number((agentsAdapted / totalAgents).toFixed(2)) : 0;
|
|
433
|
+
// Post-constraint behavior
|
|
434
|
+
const allPostConstraint = agentTrajectories.flatMap(t => t.postConstraintBehaviors);
|
|
435
|
+
const dominantPostConstraintBehavior = allPostConstraint.length > 0
|
|
436
|
+
? mostCommon(allPostConstraint) : null;
|
|
437
|
+
// Aggregate distribution deltas
|
|
438
|
+
const baselineAvgAggressive = baselineDistribution.length > 0
|
|
439
|
+
? baselineDistribution.reduce((s, d) => s + d.aggressive, 0) / baselineDistribution.length : 0;
|
|
440
|
+
const governedAvgAggressive = governedDistribution.length > 0
|
|
441
|
+
? governedDistribution.reduce((s, d) => s + d.aggressive, 0) / governedDistribution.length : 0;
|
|
442
|
+
const aggressiveReduction = Number(((baselineAvgAggressive - governedAvgAggressive) * 100).toFixed(1));
|
|
443
|
+
const baselineAvgCooperative = baselineDistribution.length > 0
|
|
444
|
+
? baselineDistribution.reduce((s, d) => s + d.cooperative, 0) / baselineDistribution.length : 0;
|
|
445
|
+
const governedAvgCooperative = governedDistribution.length > 0
|
|
446
|
+
? governedDistribution.reduce((s, d) => s + d.cooperative, 0) / governedDistribution.length : 0;
|
|
447
|
+
const cooperativeIncrease = Number(((governedAvgCooperative - baselineAvgCooperative) * 100).toFixed(1));
|
|
448
|
+
// Build headline
|
|
449
|
+
const headline = buildBehavioralHeadline(agentsAdapted, totalAgents, aggressiveReduction, cooperativeIncrease, dominantPostConstraintBehavior);
|
|
450
|
+
return {
|
|
451
|
+
agentTrajectories,
|
|
452
|
+
baselineDistribution,
|
|
453
|
+
governedDistribution,
|
|
454
|
+
behavioralShifts,
|
|
455
|
+
summary: {
|
|
456
|
+
agentsAdapted,
|
|
457
|
+
totalAgents,
|
|
458
|
+
adaptationRate,
|
|
459
|
+
dominantPostConstraintBehavior,
|
|
460
|
+
aggressiveReduction,
|
|
461
|
+
cooperativeIncrease,
|
|
462
|
+
headline,
|
|
463
|
+
},
|
|
464
|
+
};
|
|
465
|
+
}
|
|
466
|
+
function buildBehavioralHeadline(adapted, total, aggressiveReduction, cooperativeIncrease, postConstraintBehavior) {
|
|
467
|
+
const parts = [];
|
|
468
|
+
if (adapted > 0) {
|
|
469
|
+
parts.push(`${adapted}/${total} agents adapted their behavior`);
|
|
470
|
+
}
|
|
471
|
+
if (aggressiveReduction > 5) {
|
|
472
|
+
parts.push(`aggressive actions down ${aggressiveReduction.toFixed(0)}pp`);
|
|
473
|
+
}
|
|
474
|
+
if (cooperativeIncrease > 5) {
|
|
475
|
+
parts.push(`cooperative behavior up ${cooperativeIncrease.toFixed(0)}pp`);
|
|
476
|
+
}
|
|
477
|
+
if (postConstraintBehavior && postConstraintBehavior !== "neutral") {
|
|
478
|
+
parts.push(`blocked agents shifted to ${postConstraintBehavior}`);
|
|
479
|
+
}
|
|
480
|
+
if (parts.length === 0) {
|
|
481
|
+
return "Governance had minimal measurable effect on agent behavior patterns.";
|
|
482
|
+
}
|
|
483
|
+
return parts.join(" — ") + ".";
|
|
484
|
+
}
|
|
485
|
+
// ============================================
|
|
486
|
+
// FORMATTING — CLI Output
|
|
487
|
+
// ============================================
|
|
488
|
+
/**
|
|
489
|
+
* Format behavioral analysis for CLI output.
|
|
490
|
+
* This is the part of the report people actually read.
|
|
491
|
+
*/
|
|
492
|
+
function formatBehavioralAnalysis(analysis) {
|
|
493
|
+
const lines = [];
|
|
494
|
+
lines.push("");
|
|
495
|
+
lines.push(" BEHAVIORAL ANALYSIS");
|
|
496
|
+
lines.push(" " + "=".repeat(70));
|
|
497
|
+
lines.push(` ${analysis.summary.headline}`);
|
|
498
|
+
lines.push("");
|
|
499
|
+
// Agent trajectories
|
|
500
|
+
lines.push(" AGENT TRAJECTORIES");
|
|
501
|
+
lines.push(" " + "-".repeat(70));
|
|
502
|
+
lines.push(` ${"Agent".padEnd(25)} ${"Behavior Arc".padEnd(30)} ${"Constrained".padEnd(12)} ${"Trend"}`);
|
|
503
|
+
lines.push(" " + "-".repeat(70));
|
|
504
|
+
for (const t of analysis.agentTrajectories) {
|
|
505
|
+
const name = t.agentId.length > 23 ? t.agentId.slice(0, 22) + "…" : t.agentId;
|
|
506
|
+
const arc = t.adapted
|
|
507
|
+
? `${t.shift.from} → ${t.shift.to}`
|
|
508
|
+
: `${t.steps[0]?.behavior ?? "unknown"} (stable)`;
|
|
509
|
+
const constrained = t.timesConstrained > 0 ? `${t.timesConstrained}x` : "-";
|
|
510
|
+
lines.push(` ${name.padEnd(25)} ${arc.padEnd(30)} ${constrained.padEnd(12)} ${t.trend}`);
|
|
511
|
+
}
|
|
512
|
+
// Action distribution over time
|
|
513
|
+
lines.push("");
|
|
514
|
+
lines.push(" ACTION DISTRIBUTION (governed, per round)");
|
|
515
|
+
lines.push(" " + "-".repeat(70));
|
|
516
|
+
lines.push(` ${"Round".padEnd(8)} ${"Aggr".padEnd(8)} ${"Def".padEnd(8)} ${"Caut".padEnd(8)} ${"Coop".padEnd(8)} ${"Opp".padEnd(8)} ${"Neut"}`);
|
|
517
|
+
lines.push(" " + "-".repeat(70));
|
|
518
|
+
for (const d of analysis.governedDistribution) {
|
|
519
|
+
lines.push(` ${String(d.round).padEnd(8)} ${(d.aggressive * 100).toFixed(0).padStart(3)}% ${(d.defensive * 100).toFixed(0).padStart(3)}% ${(d.cautious * 100).toFixed(0).padStart(3)}% ${(d.cooperative * 100).toFixed(0).padStart(3)}% ${(d.opportunistic * 100).toFixed(0).padStart(3)}% ${(d.neutral * 100).toFixed(0).padStart(3)}%`);
|
|
520
|
+
}
|
|
521
|
+
// Baseline vs governed comparison
|
|
522
|
+
if (analysis.baselineDistribution.length > 0 && analysis.governedDistribution.length > 0) {
|
|
523
|
+
const bLast = analysis.baselineDistribution[analysis.baselineDistribution.length - 1];
|
|
524
|
+
const gLast = analysis.governedDistribution[analysis.governedDistribution.length - 1];
|
|
525
|
+
lines.push("");
|
|
526
|
+
lines.push(" BASELINE vs GOVERNED (final round)");
|
|
527
|
+
lines.push(" " + "-".repeat(70));
|
|
528
|
+
lines.push(` Aggressive: ${(bLast.aggressive * 100).toFixed(0)}% → ${(gLast.aggressive * 100).toFixed(0)}%`);
|
|
529
|
+
lines.push(` Defensive: ${(bLast.defensive * 100).toFixed(0)}% → ${(gLast.defensive * 100).toFixed(0)}%`);
|
|
530
|
+
lines.push(` Cautious: ${(bLast.cautious * 100).toFixed(0)}% → ${(gLast.cautious * 100).toFixed(0)}%`);
|
|
531
|
+
lines.push(` Cooperative: ${(bLast.cooperative * 100).toFixed(0)}% → ${(gLast.cooperative * 100).toFixed(0)}%`);
|
|
532
|
+
}
|
|
533
|
+
// Behavioral shifts (the headlines)
|
|
534
|
+
if (analysis.behavioralShifts.length > 0) {
|
|
535
|
+
lines.push("");
|
|
536
|
+
lines.push(" BEHAVIORAL SHIFTS DETECTED");
|
|
537
|
+
lines.push(" " + "-".repeat(70));
|
|
538
|
+
for (const shift of analysis.behavioralShifts) {
|
|
539
|
+
const tag = shift.type === "agent_adaptation" ? "ADAPT"
|
|
540
|
+
: shift.type === "population_shift" ? "SHIFT"
|
|
541
|
+
: shift.type === "emergent_cluster" ? "EMERGE"
|
|
542
|
+
: "CHANGE";
|
|
543
|
+
lines.push(` [${tag}] ${shift.description}`);
|
|
544
|
+
if (shift.involvedAgents.length > 0 && shift.involvedAgents.length <= 6) {
|
|
545
|
+
lines.push(` Agents: ${shift.involvedAgents.join(", ")}`);
|
|
546
|
+
}
|
|
547
|
+
}
|
|
548
|
+
}
|
|
549
|
+
// Post-constraint behavior
|
|
550
|
+
const constrained = analysis.agentTrajectories.filter(t => t.timesConstrained > 0);
|
|
551
|
+
if (constrained.length > 0) {
|
|
552
|
+
lines.push("");
|
|
553
|
+
lines.push(" WHAT HAPPENED AFTER BLOCKS");
|
|
554
|
+
lines.push(" " + "-".repeat(70));
|
|
555
|
+
for (const t of constrained) {
|
|
556
|
+
if (t.postConstraintBehaviors.length > 0) {
|
|
557
|
+
const behaviors = t.postConstraintBehaviors.join(" → ");
|
|
558
|
+
lines.push(` ${t.agentId}: blocked ${t.timesConstrained}x → then: ${behaviors}`);
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
if (analysis.summary.dominantPostConstraintBehavior) {
|
|
562
|
+
lines.push("");
|
|
563
|
+
lines.push(` Dominant post-block pattern: ${analysis.summary.dominantPostConstraintBehavior}`);
|
|
564
|
+
}
|
|
565
|
+
}
|
|
566
|
+
lines.push("");
|
|
567
|
+
lines.push(" " + "=".repeat(70));
|
|
568
|
+
lines.push("");
|
|
569
|
+
return lines.join("\n");
|
|
570
|
+
}
|
|
571
|
+
/**
|
|
572
|
+
* Format cross-run behavioral comparison for CLI output.
|
|
573
|
+
* This is the CEO's view: "I changed the rules. What happened?"
|
|
574
|
+
*/
|
|
575
|
+
function formatCrossRunComparison(comparison) {
|
|
576
|
+
const lines = [];
|
|
577
|
+
lines.push("");
|
|
578
|
+
lines.push(" CROSS-RUN BEHAVIORAL COMPARISON");
|
|
579
|
+
lines.push(" " + "=".repeat(70));
|
|
580
|
+
lines.push(` ${comparison.runALabel} vs ${comparison.runBLabel}`);
|
|
581
|
+
lines.push(` Rule change: ${comparison.rulesDelta}`);
|
|
582
|
+
lines.push("");
|
|
583
|
+
lines.push(` ${comparison.headline}`);
|
|
584
|
+
lines.push("");
|
|
585
|
+
// Per-agent differences
|
|
586
|
+
lines.push(" AGENT BEHAVIOR DIFFERENCES");
|
|
587
|
+
lines.push(" " + "-".repeat(70));
|
|
588
|
+
for (const diff of comparison.agentDifferences) {
|
|
589
|
+
const marker = diff.changed ? "*" : " ";
|
|
590
|
+
lines.push(` ${marker} ${diff.description}`);
|
|
591
|
+
}
|
|
592
|
+
// Key shifts
|
|
593
|
+
if (comparison.keyShifts.length > 0) {
|
|
594
|
+
lines.push("");
|
|
595
|
+
lines.push(" KEY SHIFTS");
|
|
596
|
+
lines.push(" " + "-".repeat(70));
|
|
597
|
+
for (const shift of comparison.keyShifts) {
|
|
598
|
+
lines.push(` [${shift.type.toUpperCase()}] ${shift.description}`);
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
lines.push("");
|
|
602
|
+
lines.push(" " + "=".repeat(70));
|
|
603
|
+
lines.push("");
|
|
604
|
+
return lines.join("\n");
|
|
605
|
+
}
|