@neuroverseos/nv-sim 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/README.md +73 -0
- package/dist/engine/analyzer.js +651 -0
- package/dist/engine/api.js +208 -0
- package/dist/engine/chaosEngine.js +292 -0
- package/dist/engine/cli.js +803 -0
- package/dist/engine/goalEngine.js +559 -0
- package/dist/engine/governance.js +210 -0
- package/dist/engine/governedSimulation.js +529 -0
- package/dist/engine/index.js +82 -0
- package/dist/engine/mirofish.js +295 -0
- package/dist/engine/reasoningEngine.js +548 -0
- package/dist/engine/scenarioCapsule.js +351 -0
- package/dist/engine/swarmSimulation.js +244 -0
- package/dist/engine/types.js +15 -0
- package/dist/engine/worldBridge.js +481 -0
- package/dist/package.json +1 -0
- package/package.json +110 -0
|
@@ -0,0 +1,559 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Goal-Directed Strategy Engine
|
|
4
|
+
*
|
|
5
|
+
* "Find the path through the swarm."
|
|
6
|
+
*
|
|
7
|
+
* When a user specifies a desired outcome, this engine works BACKWARD
|
|
8
|
+
* from the goal to generate candidate strategies, then tests each one
|
|
9
|
+
* against the swarm simulation to rank feasibility.
|
|
10
|
+
*
|
|
11
|
+
* Two modes of Mirotir:
|
|
12
|
+
* EXPLORE: "What might happen?" → forward simulation → discovery
|
|
13
|
+
* GOAL: "How do I get here?" → backward reasoning → strategy design
|
|
14
|
+
*
|
|
15
|
+
* Architecture:
|
|
16
|
+
* goal state
|
|
17
|
+
* ↓
|
|
18
|
+
* infer required levers
|
|
19
|
+
* ↓
|
|
20
|
+
* generate candidate strategies
|
|
21
|
+
* ↓
|
|
22
|
+
* swarm tests each strategy
|
|
23
|
+
* ↓
|
|
24
|
+
* evaluate: which path reaches the goal?
|
|
25
|
+
* ↓
|
|
26
|
+
* rank paths by success probability
|
|
27
|
+
* ↓
|
|
28
|
+
* recommend the best path that respects red lines
|
|
29
|
+
*
|
|
30
|
+
* This is backcasting / goal-conditioned reasoning — used in military
|
|
31
|
+
* strategy, corporate planning, policy design, and game AI.
|
|
32
|
+
*/
|
|
33
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
34
|
+
exports.runGoalReasoning = runGoalReasoning;
|
|
35
|
+
const mirofish_1 = require("./mirofish");
|
|
36
|
+
// ============================================
|
|
37
|
+
// LEVER INFERENCE
|
|
38
|
+
// ============================================
|
|
39
|
+
/**
|
|
40
|
+
* Infer available levers from the scenario and stakeholders
|
|
41
|
+
* when the user hasn't specified them explicitly.
|
|
42
|
+
*/
|
|
43
|
+
function inferLevers(scenario, stakeholders, goal) {
|
|
44
|
+
const levers = [];
|
|
45
|
+
const scenarioLower = scenario.toLowerCase();
|
|
46
|
+
const goalLower = goal.outcome.toLowerCase();
|
|
47
|
+
// Energy/economic levers
|
|
48
|
+
if (scenarioLower.includes("oil") ||
|
|
49
|
+
scenarioLower.includes("gas") ||
|
|
50
|
+
scenarioLower.includes("energy") ||
|
|
51
|
+
scenarioLower.includes("price")) {
|
|
52
|
+
levers.push("Strategic petroleum reserve release", "Fuel tax adjustment", "Refinery capacity incentives", "Alternative energy subsidies", "Import diversification");
|
|
53
|
+
}
|
|
54
|
+
// Geopolitical levers
|
|
55
|
+
if (scenarioLower.includes("strait") ||
|
|
56
|
+
scenarioLower.includes("military") ||
|
|
57
|
+
scenarioLower.includes("war") ||
|
|
58
|
+
scenarioLower.includes("conflict") ||
|
|
59
|
+
scenarioLower.includes("sanctions")) {
|
|
60
|
+
levers.push("Diplomatic engagement", "Coalition building", "Sanctions/trade policy", "Military posture adjustment", "Back-channel communication");
|
|
61
|
+
}
|
|
62
|
+
// Regulatory levers
|
|
63
|
+
if (scenarioLower.includes("regulation") ||
|
|
64
|
+
scenarioLower.includes("law") ||
|
|
65
|
+
scenarioLower.includes("compliance") ||
|
|
66
|
+
scenarioLower.includes("policy")) {
|
|
67
|
+
levers.push("Lobbying and advocacy", "Compliance framework investment", "Industry coalition formation", "Public comment campaigns", "Alternative regulatory proposals");
|
|
68
|
+
}
|
|
69
|
+
// Crisis levers
|
|
70
|
+
if (scenarioLower.includes("breach") ||
|
|
71
|
+
scenarioLower.includes("crisis") ||
|
|
72
|
+
scenarioLower.includes("scandal") ||
|
|
73
|
+
scenarioLower.includes("reputation")) {
|
|
74
|
+
levers.push("Transparent disclosure", "Stakeholder communication plan", "Third-party audit/investigation", "Remediation program", "Leadership accountability");
|
|
75
|
+
}
|
|
76
|
+
// Market/business levers
|
|
77
|
+
if (scenarioLower.includes("market") ||
|
|
78
|
+
scenarioLower.includes("company") ||
|
|
79
|
+
scenarioLower.includes("business") ||
|
|
80
|
+
scenarioLower.includes("startup")) {
|
|
81
|
+
levers.push("Pricing strategy", "Partnership/acquisition", "Market positioning pivot", "Capital allocation", "Talent/team restructuring");
|
|
82
|
+
}
|
|
83
|
+
// Generic levers if nothing specific matched
|
|
84
|
+
if (levers.length === 0) {
|
|
85
|
+
levers.push("Stakeholder engagement", "Resource reallocation", "Communication strategy", "Policy change", "Coalition building");
|
|
86
|
+
}
|
|
87
|
+
return levers;
|
|
88
|
+
}
|
|
89
|
+
// ============================================
|
|
90
|
+
// STRATEGY GENERATION
|
|
91
|
+
// ============================================
|
|
92
|
+
/**
|
|
93
|
+
* Generate candidate strategies to reach the goal state.
|
|
94
|
+
*
|
|
95
|
+
* This is the core of goal-directed reasoning:
|
|
96
|
+
* Work backward from the desired outcome and identify paths.
|
|
97
|
+
*/
|
|
98
|
+
function generateStrategies(scenario, goal, stakeholders, levers) {
|
|
99
|
+
const strategies = [];
|
|
100
|
+
const priority = goal.priority ?? "important";
|
|
101
|
+
const redLines = goal.red_lines ?? [];
|
|
102
|
+
// Strategy 1: Direct path (most levers, fastest)
|
|
103
|
+
strategies.push({
|
|
104
|
+
id: "strategy_direct",
|
|
105
|
+
label: "Direct Path",
|
|
106
|
+
description: `Aggressive, multi-lever approach to reach "${goal.outcome}" as quickly as possible. ` +
|
|
107
|
+
"Uses the most available levers simultaneously for maximum force.",
|
|
108
|
+
steps: buildDirectSteps(goal, levers),
|
|
109
|
+
levers_used: levers.slice(0, 3),
|
|
110
|
+
success_probability: priority === "critical" ? 0.55 : 0.45,
|
|
111
|
+
estimated_timeframe: goal.timeframe ?? "30 days",
|
|
112
|
+
risk: "high",
|
|
113
|
+
red_line_violations: assessRedLineViolations("direct", redLines, levers.slice(0, 3)),
|
|
114
|
+
dependencies: [
|
|
115
|
+
"Multiple levers must be available simultaneously",
|
|
116
|
+
"Stakeholders must not coordinate opposition",
|
|
117
|
+
"No secondary crisis emerges during execution",
|
|
118
|
+
],
|
|
119
|
+
failure_modes: [
|
|
120
|
+
"Aggressive action triggers backlash coalition",
|
|
121
|
+
"Resource overcommitment leaves no fallback",
|
|
122
|
+
"Speed creates execution errors",
|
|
123
|
+
],
|
|
124
|
+
likely_allies: stakeholders
|
|
125
|
+
.filter((s) => s.disposition === "supportive")
|
|
126
|
+
.map((s) => s.id),
|
|
127
|
+
likely_opponents: stakeholders
|
|
128
|
+
.filter((s) => s.disposition === "hostile")
|
|
129
|
+
.map((s) => s.id),
|
|
130
|
+
});
|
|
131
|
+
// Strategy 2: Coalition path (build alliances first)
|
|
132
|
+
const allies = stakeholders.filter((s) => s.disposition === "supportive" || s.disposition === "neutral");
|
|
133
|
+
strategies.push({
|
|
134
|
+
id: "strategy_coalition",
|
|
135
|
+
label: "Coalition Path",
|
|
136
|
+
description: `Build a stakeholder coalition first, then apply coordinated pressure toward "${goal.outcome}". ` +
|
|
137
|
+
"Slower but more sustainable — collective action is harder to resist.",
|
|
138
|
+
steps: buildCoalitionSteps(goal, levers, allies),
|
|
139
|
+
levers_used: ["Coalition building", ...levers.slice(0, 2)],
|
|
140
|
+
success_probability: 0.6,
|
|
141
|
+
estimated_timeframe: extendTimeframe(goal.timeframe ?? "30 days", 1.5),
|
|
142
|
+
risk: "moderate",
|
|
143
|
+
red_line_violations: assessRedLineViolations("coalition", redLines, ["Coalition building", ...levers.slice(0, 2)]),
|
|
144
|
+
dependencies: [
|
|
145
|
+
"Key neutral stakeholders are persuadable",
|
|
146
|
+
"Coalition members maintain alignment throughout execution",
|
|
147
|
+
"No external shock dissolves the coalition",
|
|
148
|
+
],
|
|
149
|
+
failure_modes: [
|
|
150
|
+
"Coalition fractures under pressure",
|
|
151
|
+
"Opponents form counter-coalition faster",
|
|
152
|
+
"Consensus-building delays miss the window",
|
|
153
|
+
],
|
|
154
|
+
likely_allies: allies.map((s) => s.id),
|
|
155
|
+
likely_opponents: stakeholders
|
|
156
|
+
.filter((s) => s.disposition === "hostile")
|
|
157
|
+
.map((s) => s.id),
|
|
158
|
+
});
|
|
159
|
+
// Strategy 3: Incremental path (low risk, slow)
|
|
160
|
+
strategies.push({
|
|
161
|
+
id: "strategy_incremental",
|
|
162
|
+
label: "Incremental Path",
|
|
163
|
+
description: `Move toward "${goal.outcome}" through small, reversible steps. ` +
|
|
164
|
+
"Each step is tested before committing further. Preserves optionality.",
|
|
165
|
+
steps: buildIncrementalSteps(goal, levers),
|
|
166
|
+
levers_used: levers.slice(0, 2),
|
|
167
|
+
success_probability: 0.65,
|
|
168
|
+
estimated_timeframe: extendTimeframe(goal.timeframe ?? "30 days", 2),
|
|
169
|
+
risk: "low",
|
|
170
|
+
red_line_violations: [],
|
|
171
|
+
dependencies: [
|
|
172
|
+
"Sufficient time to iterate",
|
|
173
|
+
"Each step produces measurable feedback",
|
|
174
|
+
"The goal remains viable throughout the process",
|
|
175
|
+
],
|
|
176
|
+
failure_modes: [
|
|
177
|
+
"Window of opportunity closes before full execution",
|
|
178
|
+
"Incremental moves are individually too small to create momentum",
|
|
179
|
+
"Competitors move faster with bolder strategies",
|
|
180
|
+
],
|
|
181
|
+
likely_allies: stakeholders
|
|
182
|
+
.filter((s) => s.disposition !== "hostile")
|
|
183
|
+
.map((s) => s.id),
|
|
184
|
+
});
|
|
185
|
+
// Strategy 4: Asymmetric path (find the non-obvious lever)
|
|
186
|
+
if (levers.length >= 3) {
|
|
187
|
+
strategies.push({
|
|
188
|
+
id: "strategy_asymmetric",
|
|
189
|
+
label: "Asymmetric Path",
|
|
190
|
+
description: `Find the one lever that has outsized impact on achieving "${goal.outcome}". ` +
|
|
191
|
+
"Instead of pushing many levers, find the fulcrum.",
|
|
192
|
+
steps: buildAsymmetricSteps(goal, levers),
|
|
193
|
+
levers_used: [levers[levers.length - 1]],
|
|
194
|
+
success_probability: 0.35,
|
|
195
|
+
estimated_timeframe: goal.timeframe ?? "30 days",
|
|
196
|
+
risk: "moderate",
|
|
197
|
+
red_line_violations: assessRedLineViolations("asymmetric", redLines, [levers[levers.length - 1]]),
|
|
198
|
+
dependencies: [
|
|
199
|
+
"The identified fulcrum lever actually has the projected influence",
|
|
200
|
+
"Opponents don't anticipate this approach",
|
|
201
|
+
"No defensive positions are pre-established against this lever",
|
|
202
|
+
],
|
|
203
|
+
failure_modes: [
|
|
204
|
+
"The fulcrum lever doesn't have the expected effect",
|
|
205
|
+
"Narrow approach is easily countered once recognized",
|
|
206
|
+
"Success depends on a single point of leverage that may not hold",
|
|
207
|
+
],
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
return strategies;
|
|
211
|
+
}
|
|
212
|
+
// ============================================
|
|
213
|
+
// STRATEGY STEP BUILDERS
|
|
214
|
+
// ============================================
|
|
215
|
+
function buildDirectSteps(goal, levers) {
|
|
216
|
+
const steps = [];
|
|
217
|
+
const timeframe = goal.timeframe ?? "30 days";
|
|
218
|
+
steps.push({
|
|
219
|
+
order: 1,
|
|
220
|
+
action: "Deploy primary lever immediately",
|
|
221
|
+
timeframe: "Day 1-3",
|
|
222
|
+
lever: levers[0] ?? "Primary action",
|
|
223
|
+
expected_effect: "Establish momentum and signal commitment to the goal",
|
|
224
|
+
risk: "Early commitment before full information",
|
|
225
|
+
});
|
|
226
|
+
if (levers.length >= 2) {
|
|
227
|
+
steps.push({
|
|
228
|
+
order: 2,
|
|
229
|
+
action: "Activate secondary lever to create pressure from a different angle",
|
|
230
|
+
timeframe: "Day 3-7",
|
|
231
|
+
lever: levers[1],
|
|
232
|
+
expected_effect: "Create multi-directional pressure that's harder to resist",
|
|
233
|
+
risk: "Resource strain from parallel execution",
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
if (levers.length >= 3) {
|
|
237
|
+
steps.push({
|
|
238
|
+
order: 3,
|
|
239
|
+
action: "Apply tertiary lever to close remaining gaps",
|
|
240
|
+
timeframe: "Day 7-14",
|
|
241
|
+
lever: levers[2],
|
|
242
|
+
expected_effect: "Address residual resistance and accelerate toward goal",
|
|
243
|
+
risk: "Overextension if earlier levers haven't produced results",
|
|
244
|
+
});
|
|
245
|
+
}
|
|
246
|
+
steps.push({
|
|
247
|
+
order: steps.length + 1,
|
|
248
|
+
action: "Evaluate progress against success criteria and adjust",
|
|
249
|
+
timeframe: `Day 14-${timeframe}`,
|
|
250
|
+
lever: "Assessment and adjustment",
|
|
251
|
+
expected_effect: "Course correction based on actual results vs. projections",
|
|
252
|
+
risk: "Sunk cost bias may prevent necessary pivots",
|
|
253
|
+
});
|
|
254
|
+
return steps;
|
|
255
|
+
}
|
|
256
|
+
function buildCoalitionSteps(goal, levers, allies) {
|
|
257
|
+
return [
|
|
258
|
+
{
|
|
259
|
+
order: 1,
|
|
260
|
+
action: `Build coalition with ${allies.length > 0 ? allies.map((a) => a.id).slice(0, 3).join(", ") : "key neutral stakeholders"}`,
|
|
261
|
+
timeframe: "Week 1-2",
|
|
262
|
+
lever: "Coalition building",
|
|
263
|
+
expected_effect: "Create collective leverage that exceeds individual power",
|
|
264
|
+
risk: "Coalition formation takes time; window may narrow",
|
|
265
|
+
},
|
|
266
|
+
{
|
|
267
|
+
order: 2,
|
|
268
|
+
action: "Align coalition on shared strategy and communication",
|
|
269
|
+
timeframe: "Week 2-3",
|
|
270
|
+
lever: "Coordination",
|
|
271
|
+
expected_effect: "Unified messaging prevents opponents from dividing the coalition",
|
|
272
|
+
risk: "Internal disagreements may surface during alignment",
|
|
273
|
+
},
|
|
274
|
+
{
|
|
275
|
+
order: 3,
|
|
276
|
+
action: "Execute coordinated lever deployment",
|
|
277
|
+
timeframe: "Week 3-4",
|
|
278
|
+
lever: levers[0] ?? "Primary collective action",
|
|
279
|
+
expected_effect: `Coalition moves together toward "${goal.outcome}"`,
|
|
280
|
+
risk: "Weakest coalition member may defect under pressure",
|
|
281
|
+
},
|
|
282
|
+
{
|
|
283
|
+
order: 4,
|
|
284
|
+
action: "Sustain pressure and consolidate gains",
|
|
285
|
+
timeframe: "Week 4+",
|
|
286
|
+
lever: levers[1] ?? "Sustained engagement",
|
|
287
|
+
expected_effect: "Lock in progress and prevent rollback",
|
|
288
|
+
risk: "Coalition fatigue and attention drift",
|
|
289
|
+
},
|
|
290
|
+
];
|
|
291
|
+
}
|
|
292
|
+
function buildIncrementalSteps(goal, levers) {
|
|
293
|
+
return [
|
|
294
|
+
{
|
|
295
|
+
order: 1,
|
|
296
|
+
action: "Take smallest possible step toward the goal to test feasibility",
|
|
297
|
+
timeframe: "Week 1",
|
|
298
|
+
lever: levers[0] ?? "Pilot action",
|
|
299
|
+
expected_effect: "Generate signal about whether this direction works",
|
|
300
|
+
risk: "Minimal — designed to be reversible",
|
|
301
|
+
},
|
|
302
|
+
{
|
|
303
|
+
order: 2,
|
|
304
|
+
action: "Evaluate step 1 results against success criteria",
|
|
305
|
+
timeframe: "Week 2",
|
|
306
|
+
lever: "Assessment",
|
|
307
|
+
expected_effect: "Determine if the approach is viable before scaling",
|
|
308
|
+
risk: "May not be enough data for confident evaluation",
|
|
309
|
+
},
|
|
310
|
+
{
|
|
311
|
+
order: 3,
|
|
312
|
+
action: "Scale up if step 1 produced positive signal; pivot if not",
|
|
313
|
+
timeframe: "Week 3-4",
|
|
314
|
+
lever: levers[1] ?? levers[0] ?? "Scaled action",
|
|
315
|
+
expected_effect: "Either accelerate a proven approach or redirect resources",
|
|
316
|
+
risk: "Competitors may move faster with bolder approaches",
|
|
317
|
+
},
|
|
318
|
+
{
|
|
319
|
+
order: 4,
|
|
320
|
+
action: "Continue test-and-scale cycle until success criteria met",
|
|
321
|
+
timeframe: "Ongoing",
|
|
322
|
+
lever: "Iterative execution",
|
|
323
|
+
expected_effect: "Converge toward the goal through validated learning",
|
|
324
|
+
risk: "Window of opportunity may close before iteration completes",
|
|
325
|
+
},
|
|
326
|
+
];
|
|
327
|
+
}
|
|
328
|
+
function buildAsymmetricSteps(goal, levers) {
|
|
329
|
+
const fulcrum = levers[levers.length - 1];
|
|
330
|
+
return [
|
|
331
|
+
{
|
|
332
|
+
order: 1,
|
|
333
|
+
action: `Identify the highest-leverage point in the system — test "${fulcrum}"`,
|
|
334
|
+
timeframe: "Day 1-5",
|
|
335
|
+
lever: "Analysis and targeting",
|
|
336
|
+
expected_effect: "Find the single point where minimal force creates maximum effect",
|
|
337
|
+
risk: "Analysis may identify the wrong fulcrum",
|
|
338
|
+
},
|
|
339
|
+
{
|
|
340
|
+
order: 2,
|
|
341
|
+
action: `Prepare and execute asymmetric move using ${fulcrum}`,
|
|
342
|
+
timeframe: "Day 5-10",
|
|
343
|
+
lever: fulcrum,
|
|
344
|
+
expected_effect: "Disproportionate impact through a single, well-targeted intervention",
|
|
345
|
+
risk: "If the fulcrum doesn't hold, there's no backup in progress",
|
|
346
|
+
},
|
|
347
|
+
{
|
|
348
|
+
order: 3,
|
|
349
|
+
action: "Exploit the opening created by the asymmetric move",
|
|
350
|
+
timeframe: "Day 10-20",
|
|
351
|
+
lever: "Exploitation of advantage",
|
|
352
|
+
expected_effect: "Convert initial disruption into sustained progress toward goal",
|
|
353
|
+
risk: "Opponents may adapt quickly once they understand the approach",
|
|
354
|
+
},
|
|
355
|
+
];
|
|
356
|
+
}
|
|
357
|
+
// ============================================
|
|
358
|
+
// RED LINE ASSESSMENT
|
|
359
|
+
// ============================================
|
|
360
|
+
function assessRedLineViolations(strategyType, redLines, leversUsed) {
|
|
361
|
+
const violations = [];
|
|
362
|
+
for (const redLine of redLines) {
|
|
363
|
+
const rl = redLine.toLowerCase();
|
|
364
|
+
for (const lever of leversUsed) {
|
|
365
|
+
const lv = lever.toLowerCase();
|
|
366
|
+
// Check for obvious conflicts
|
|
367
|
+
if ((rl.includes("military") && lv.includes("military")) ||
|
|
368
|
+
(rl.includes("tax") && lv.includes("tax")) ||
|
|
369
|
+
(rl.includes("force") && lv.includes("force")) ||
|
|
370
|
+
(rl.includes("sanctions") && lv.includes("sanction"))) {
|
|
371
|
+
violations.push(`Strategy "${strategyType}" uses "${lever}" which may conflict with red line: "${redLine}"`);
|
|
372
|
+
}
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
return violations;
|
|
376
|
+
}
|
|
377
|
+
// ============================================
|
|
378
|
+
// TIMEFRAME UTILITIES
|
|
379
|
+
// ============================================
|
|
380
|
+
function extendTimeframe(base, multiplier) {
|
|
381
|
+
const match = base.match(/(\d+)\s*(day|week|month|year)/i);
|
|
382
|
+
if (!match)
|
|
383
|
+
return base;
|
|
384
|
+
const num = parseInt(match[1]);
|
|
385
|
+
const unit = match[2].toLowerCase();
|
|
386
|
+
return `${Math.ceil(num * multiplier)} ${unit}s`;
|
|
387
|
+
}
|
|
388
|
+
// ============================================
|
|
389
|
+
// SWARM STRATEGY TESTING
|
|
390
|
+
// ============================================
|
|
391
|
+
/**
|
|
392
|
+
* Test a strategy against the swarm simulation.
|
|
393
|
+
* Runs the simulation with the strategy as context and
|
|
394
|
+
* evaluates whether the goal is reached.
|
|
395
|
+
*/
|
|
396
|
+
async function testStrategyAgainstSwarm(strategy, scenario, goal, stakeholders, swarmConfig) {
|
|
397
|
+
// Build a modified scenario that includes the strategy being attempted
|
|
398
|
+
const strategyScenario = `${scenario} [Strategy being tested: ${strategy.label} — ${strategy.description}]`;
|
|
399
|
+
// Create reasoning paths from strategy for swarm consumption
|
|
400
|
+
const strategyPath = {
|
|
401
|
+
id: strategy.id,
|
|
402
|
+
label: strategy.label,
|
|
403
|
+
description: strategy.description,
|
|
404
|
+
projected_outcome: `Goal: ${goal.outcome}`,
|
|
405
|
+
probability: strategy.success_probability,
|
|
406
|
+
risk: strategy.risk,
|
|
407
|
+
tradeoffs: strategy.failure_modes.map((f) => f),
|
|
408
|
+
benefits_stakeholders: strategy.likely_allies,
|
|
409
|
+
harms_stakeholders: strategy.likely_opponents,
|
|
410
|
+
};
|
|
411
|
+
// Run the simulation
|
|
412
|
+
const unified = await (0, mirofish_1.runUnifiedSimulation)(strategyScenario, stakeholders, [strategyPath], swarmConfig);
|
|
413
|
+
const result = unified.result;
|
|
414
|
+
// Evaluate goal proximity from swarm results
|
|
415
|
+
const avgImpact = result.rounds.length > 0
|
|
416
|
+
? result.rounds[result.rounds.length - 1].reactions.reduce((s, r) => s + r.impact, 0) / result.rounds[result.rounds.length - 1].reactions.length
|
|
417
|
+
: 0;
|
|
418
|
+
// Positive average impact = moving toward goal
|
|
419
|
+
const goalProximity = Math.max(0, Math.min(1, (avgImpact + 1) / 2));
|
|
420
|
+
const goalReached = goalProximity > 0.7;
|
|
421
|
+
// Check for red line crossings
|
|
422
|
+
const redLinesCrossed = [];
|
|
423
|
+
for (const violation of strategy.red_line_violations) {
|
|
424
|
+
redLinesCrossed.push(violation);
|
|
425
|
+
}
|
|
426
|
+
// Detect unintended consequences from emergent dynamics
|
|
427
|
+
const unintendedConsequences = [];
|
|
428
|
+
for (const round of result.rounds) {
|
|
429
|
+
if (round.emergent_dynamics) {
|
|
430
|
+
for (const dynamic of round.emergent_dynamics) {
|
|
431
|
+
if (dynamic.toLowerCase().includes("opposition") ||
|
|
432
|
+
dynamic.toLowerCase().includes("polariz") ||
|
|
433
|
+
dynamic.toLowerCase().includes("coalition risk")) {
|
|
434
|
+
unintendedConsequences.push(dynamic);
|
|
435
|
+
}
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
// Get final round reactions
|
|
440
|
+
const finalReactions = result.rounds.length > 0
|
|
441
|
+
? result.rounds[result.rounds.length - 1].reactions
|
|
442
|
+
: [];
|
|
443
|
+
return {
|
|
444
|
+
strategy_id: strategy.id,
|
|
445
|
+
goal_reached: goalReached,
|
|
446
|
+
goal_proximity: Number(goalProximity.toFixed(2)),
|
|
447
|
+
steps_to_goal: goalReached
|
|
448
|
+
? result.rounds.length
|
|
449
|
+
: "not_reached",
|
|
450
|
+
stakeholder_reactions: finalReactions,
|
|
451
|
+
unintended_consequences: unintendedConsequences,
|
|
452
|
+
red_lines_crossed: redLinesCrossed,
|
|
453
|
+
};
|
|
454
|
+
}
|
|
455
|
+
// ============================================
|
|
456
|
+
// MAIN GOAL ENGINE
|
|
457
|
+
// ============================================
|
|
458
|
+
/**
|
|
459
|
+
* Run goal-directed reasoning.
|
|
460
|
+
*
|
|
461
|
+
* "How do I get here?"
|
|
462
|
+
*
|
|
463
|
+
* 1. Infer or use provided levers
|
|
464
|
+
* 2. Generate candidate strategies
|
|
465
|
+
* 3. Test each strategy against the swarm
|
|
466
|
+
* 4. Rank by success probability respecting red lines
|
|
467
|
+
* 5. Recommend the best path
|
|
468
|
+
*/
|
|
469
|
+
async function runGoalReasoning(request, stakeholders) {
|
|
470
|
+
const goal = request.goal;
|
|
471
|
+
// 1. Determine available levers
|
|
472
|
+
const levers = goal.available_levers && goal.available_levers.length > 0
|
|
473
|
+
? goal.available_levers
|
|
474
|
+
: inferLevers(request.scenario, stakeholders, goal);
|
|
475
|
+
// 2. Generate candidate strategies
|
|
476
|
+
const strategies = generateStrategies(request.scenario, goal, stakeholders, levers);
|
|
477
|
+
// 3. Test each strategy against the swarm (if swarm is enabled)
|
|
478
|
+
let swarmTests = [];
|
|
479
|
+
if (request.swarm?.enabled && stakeholders.length > 0) {
|
|
480
|
+
const testPromises = strategies.map((strategy) => testStrategyAgainstSwarm(strategy, request.scenario, goal, stakeholders, request.swarm));
|
|
481
|
+
swarmTests = await Promise.all(testPromises);
|
|
482
|
+
}
|
|
483
|
+
else {
|
|
484
|
+
// Generate synthetic test results without swarm
|
|
485
|
+
swarmTests = strategies.map((strategy) => ({
|
|
486
|
+
strategy_id: strategy.id,
|
|
487
|
+
goal_reached: strategy.success_probability > 0.5,
|
|
488
|
+
goal_proximity: strategy.success_probability,
|
|
489
|
+
steps_to_goal: strategy.success_probability > 0.5
|
|
490
|
+
? Math.ceil(3 / strategy.success_probability)
|
|
491
|
+
: "not_reached",
|
|
492
|
+
stakeholder_reactions: [],
|
|
493
|
+
unintended_consequences: strategy.risk === "high"
|
|
494
|
+
? ["High-risk strategies may trigger unforeseen opposition"]
|
|
495
|
+
: [],
|
|
496
|
+
red_lines_crossed: strategy.red_line_violations,
|
|
497
|
+
}));
|
|
498
|
+
}
|
|
499
|
+
// 4. Rank strategies — highest success probability that respects red lines
|
|
500
|
+
const rankedStrategies = [...strategies].sort((a, b) => {
|
|
501
|
+
const aTest = swarmTests.find((t) => t.strategy_id === a.id);
|
|
502
|
+
const bTest = swarmTests.find((t) => t.strategy_id === b.id);
|
|
503
|
+
// Penalize red line violations
|
|
504
|
+
const aViolations = aTest?.red_lines_crossed.length ?? 0;
|
|
505
|
+
const bViolations = bTest?.red_lines_crossed.length ?? 0;
|
|
506
|
+
if (aViolations > 0 && bViolations === 0)
|
|
507
|
+
return 1;
|
|
508
|
+
if (bViolations > 0 && aViolations === 0)
|
|
509
|
+
return -1;
|
|
510
|
+
// Then sort by goal proximity
|
|
511
|
+
const aProximity = aTest?.goal_proximity ?? a.success_probability;
|
|
512
|
+
const bProximity = bTest?.goal_proximity ?? b.success_probability;
|
|
513
|
+
return bProximity - aProximity;
|
|
514
|
+
});
|
|
515
|
+
const recommended = rankedStrategies[0];
|
|
516
|
+
const recommendedTest = swarmTests.find((t) => t.strategy_id === recommended.id);
|
|
517
|
+
// 5. Build rationale
|
|
518
|
+
const rationale = buildRecommendationRationale(recommended, recommendedTest, goal, rankedStrategies);
|
|
519
|
+
// 6. Identify rejected strategies
|
|
520
|
+
const rejected = rankedStrategies.slice(1).map((s) => {
|
|
521
|
+
const test = swarmTests.find((t) => t.strategy_id === s.id);
|
|
522
|
+
let reason;
|
|
523
|
+
if ((test?.red_lines_crossed.length ?? 0) > 0) {
|
|
524
|
+
reason = `Violates red line: ${test.red_lines_crossed[0]}`;
|
|
525
|
+
}
|
|
526
|
+
else if ((test?.goal_proximity ?? 0) < 0.3) {
|
|
527
|
+
reason = `Low goal proximity (${((test?.goal_proximity ?? 0) * 100).toFixed(0)}%)`;
|
|
528
|
+
}
|
|
529
|
+
else {
|
|
530
|
+
reason = `Lower overall success probability than recommended path`;
|
|
531
|
+
}
|
|
532
|
+
return { label: s.label, reason };
|
|
533
|
+
});
|
|
534
|
+
return {
|
|
535
|
+
goal,
|
|
536
|
+
strategies: rankedStrategies,
|
|
537
|
+
swarm_tests: swarmTests,
|
|
538
|
+
recommended_strategy_id: recommended.id,
|
|
539
|
+
recommendation_rationale: rationale,
|
|
540
|
+
immediate_next_step: recommended.steps[0]?.action ?? "Begin assessment",
|
|
541
|
+
rejected_strategies: rejected,
|
|
542
|
+
};
|
|
543
|
+
}
|
|
544
|
+
function buildRecommendationRationale(strategy, test, goal, allStrategies) {
|
|
545
|
+
const parts = [];
|
|
546
|
+
parts.push(`"${strategy.label}" is recommended because it offers the best risk-adjusted path toward "${goal.outcome}".`);
|
|
547
|
+
if (test?.goal_reached) {
|
|
548
|
+
parts.push(`Swarm simulation shows this strategy reaching the goal with ${(test.goal_proximity * 100).toFixed(0)}% proximity.`);
|
|
549
|
+
}
|
|
550
|
+
else if (test) {
|
|
551
|
+
parts.push(`Swarm simulation shows ${(test.goal_proximity * 100).toFixed(0)}% proximity to the goal — partial success is likely.`);
|
|
552
|
+
}
|
|
553
|
+
if (strategy.red_line_violations.length === 0) {
|
|
554
|
+
parts.push("This strategy respects all stated red lines.");
|
|
555
|
+
}
|
|
556
|
+
const alternativeCount = allStrategies.length - 1;
|
|
557
|
+
parts.push(`${alternativeCount} alternative ${alternativeCount === 1 ? "strategy was" : "strategies were"} evaluated and ranked lower.`);
|
|
558
|
+
return parts.join(" ");
|
|
559
|
+
}
|