@neuroverseos/nv-sim 0.1.2 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/README.md +562 -68
  2. package/dist/adapters/mirofish.js +461 -0
  3. package/dist/adapters/scienceclaw.js +750 -0
  4. package/dist/assets/index-B64NuIXu.css +1 -0
  5. package/dist/assets/index-DbzSnYxr.js +532 -0
  6. package/dist/assets/mirotir-logo-DUexumBH.svg +185 -0
  7. package/dist/assets/reportEngine-DKWTrP6-.js +1 -0
  8. package/dist/components/ConstraintsPanel.js +11 -0
  9. package/dist/components/StakeholderBuilder.js +32 -0
  10. package/dist/components/ui/badge.js +24 -0
  11. package/dist/components/ui/button.js +70 -0
  12. package/dist/components/ui/card.js +57 -0
  13. package/dist/components/ui/input.js +44 -0
  14. package/dist/components/ui/label.js +45 -0
  15. package/dist/components/ui/select.js +70 -0
  16. package/dist/engine/aiProvider.js +681 -0
  17. package/dist/engine/auditTrace.js +352 -0
  18. package/dist/engine/behavioralAnalysis.js +605 -0
  19. package/dist/engine/cli.js +1408 -299
  20. package/dist/engine/dynamicsGovernance.js +588 -0
  21. package/dist/engine/fullGovernedLoop.js +367 -0
  22. package/dist/engine/governance.js +8 -3
  23. package/dist/engine/governedSimulation.js +114 -17
  24. package/dist/engine/index.js +56 -1
  25. package/dist/engine/liveAdapter.js +342 -0
  26. package/dist/engine/liveVisualizer.js +4284 -0
  27. package/dist/engine/metrics/science.metrics.js +335 -0
  28. package/dist/engine/narrativeInjection.js +360 -0
  29. package/dist/engine/policyEnforcement.js +1611 -0
  30. package/dist/engine/policyEngine.js +799 -0
  31. package/dist/engine/primeRadiant.js +540 -0
  32. package/dist/engine/reasoningEngine.js +57 -3
  33. package/dist/engine/reportEngine.js +97 -0
  34. package/dist/engine/scenarioCapsule.js +56 -0
  35. package/dist/engine/scenarioComparison.js +463 -0
  36. package/dist/engine/scenarioLibrary.js +248 -0
  37. package/dist/engine/swarmSimulation.js +54 -1
  38. package/dist/engine/worldComparison.js +358 -0
  39. package/dist/engine/worldStorage.js +232 -0
  40. package/dist/favicon.ico +0 -0
  41. package/dist/index.html +23 -0
  42. package/dist/lib/reasoningEngine.js +290 -0
  43. package/dist/lib/simulationAdapter.js +686 -0
  44. package/dist/lib/swarmParser.js +291 -0
  45. package/dist/lib/types.js +2 -0
  46. package/dist/lib/utils.js +8 -0
  47. package/dist/placeholder.svg +1 -0
  48. package/dist/robots.txt +14 -0
  49. package/dist/runtime/govern.js +473 -0
  50. package/dist/runtime/index.js +75 -0
  51. package/dist/runtime/types.js +11 -0
  52. package/package.json +17 -12
  53. package/variants/.gitkeep +0 -0
@@ -0,0 +1,681 @@
1
+ "use strict";
2
+ /**
3
+ * AI Provider — Bring Your Own Model (BYOM)
4
+ *
5
+ * AI is NOT the engine. AI is an actor inside the governed system.
6
+ * Every AI call goes through /world evaluate — same as any other agent.
7
+ *
8
+ * Design principles:
9
+ * - AI is optional: system works without it
10
+ * - AI is governed: subject to world rules like any actor
11
+ * - AI is pluggable: users bring their own model + key
12
+ * - AI is auditable: every call appears in the trace
13
+ *
14
+ * Two governed AI roles:
15
+ * ai_translator — converts unstructured input → normalized events
16
+ * ai_analyst — generates governed reports from traces
17
+ */
18
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
19
+ if (k2 === undefined) k2 = k;
20
+ var desc = Object.getOwnPropertyDescriptor(m, k);
21
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
22
+ desc = { enumerable: true, get: function() { return m[k]; } };
23
+ }
24
+ Object.defineProperty(o, k2, desc);
25
+ }) : (function(o, m, k, k2) {
26
+ if (k2 === undefined) k2 = k;
27
+ o[k2] = m[k];
28
+ }));
29
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
30
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
31
+ }) : function(o, v) {
32
+ o["default"] = v;
33
+ });
34
+ var __importStar = (this && this.__importStar) || (function () {
35
+ var ownKeys = function(o) {
36
+ ownKeys = Object.getOwnPropertyNames || function (o) {
37
+ var ar = [];
38
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
39
+ return ar;
40
+ };
41
+ return ownKeys(o);
42
+ };
43
+ return function (mod) {
44
+ if (mod && mod.__esModule) return mod;
45
+ var result = {};
46
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
47
+ __setModuleDefault(result, mod);
48
+ return result;
49
+ };
50
+ })();
51
+ Object.defineProperty(exports, "__esModule", { value: true });
52
+ exports.OpenAICompatibleProvider = exports.AnthropicProvider = exports.DeterministicProvider = exports.AI_ROLES = void 0;
53
+ exports.generateDeterministicReport = generateDeterministicReport;
54
+ exports.generateAIReaction = generateAIReaction;
55
+ exports.getDefaultProviderName = getDefaultProviderName;
56
+ exports.registerAIProvider = registerAIProvider;
57
+ exports.getAIProvider = getAIProvider;
58
+ exports.listAIProviders = listAIProviders;
59
+ exports.evaluateAIAction = evaluateAIAction;
60
+ /** Default AI roles for world definitions */
61
+ exports.AI_ROLES = [
62
+ {
63
+ id: "ai_translator",
64
+ type: "ai",
65
+ permissions: ["translate_input"],
66
+ constraints: [
67
+ "must_output_valid_schema",
68
+ "no_invention_of_events",
69
+ "confidence_must_be_provided",
70
+ ],
71
+ },
72
+ {
73
+ id: "ai_analyst",
74
+ type: "ai",
75
+ permissions: [
76
+ "generate_report",
77
+ "summarize_trace",
78
+ "suggest_experiments",
79
+ "detect_archetype",
80
+ "synthesize_strategy",
81
+ "compare_experiments",
82
+ ],
83
+ constraints: [
84
+ "must_reference_trace",
85
+ "must_include_blocked_actions",
86
+ "must_include_metrics",
87
+ "no_unverifiable_claims",
88
+ "no_speculation_without_flag",
89
+ "must_include_confidence",
90
+ ],
91
+ },
92
+ ];
93
+ // ============================================
94
+ // BUILT-IN PROVIDER: DETERMINISTIC (NO AI)
95
+ // ============================================
96
+ /**
97
+ * Default provider when no AI is configured.
98
+ * Generates structured reports purely from trace data.
99
+ * No LLM. No API key. No network.
100
+ */
101
+ class DeterministicProvider {
102
+ name = "deterministic";
103
+ async translate(input) {
104
+ // Without AI, parse basic structured hints from input
105
+ const events = [];
106
+ const lower = input.toLowerCase();
107
+ if (lower.includes("sell") || lower.includes("panic") || lower.includes("crash")) {
108
+ events.push({
109
+ type: "market_shock",
110
+ action: "sell_pressure",
111
+ impact: -0.7,
112
+ confidence: 0.6,
113
+ severity: 0.8,
114
+ });
115
+ }
116
+ if (lower.includes("buy") || lower.includes("rally") || lower.includes("recover")) {
117
+ events.push({
118
+ type: "market_recovery",
119
+ action: "buy_pressure",
120
+ impact: 0.5,
121
+ confidence: 0.5,
122
+ severity: 0.3,
123
+ });
124
+ }
125
+ if (events.length === 0) {
126
+ events.push({
127
+ type: "unstructured_input",
128
+ action: "needs_ai_translation",
129
+ impact: 0,
130
+ confidence: 0.3,
131
+ metadata: { raw: input },
132
+ });
133
+ }
134
+ return events;
135
+ }
136
+ async summarize(trace) {
137
+ return generateDeterministicReport(trace);
138
+ }
139
+ }
140
+ exports.DeterministicProvider = DeterministicProvider;
141
+ /**
142
+ * Generate a report purely from trace data — no AI needed.
143
+ * This is the fallback and also proves governance doesn't need AI.
144
+ */
145
+ function generateDeterministicReport(trace) {
146
+ const { metrics, interventions, governanceStats, rounds } = trace;
147
+ // Extract key events from rounds with highest intervention counts
148
+ const keyEvents = rounds
149
+ .filter(r => r.interventions.length > 0)
150
+ .slice(0, 5)
151
+ .map(r => {
152
+ const agents = [...new Set(r.reactions.map(rx => rx.agent))];
153
+ const hasBlock = r.interventions.some(i => i.includes("[BLOCK]"));
154
+ return {
155
+ round: r.round,
156
+ description: r.interventions[0].replace(/^\[.*?\]\s*/, ""),
157
+ agents,
158
+ severity: (hasBlock ? "high" : "moderate"),
159
+ };
160
+ });
161
+ // Extract governance actions
162
+ const governanceActions = interventions
163
+ .slice(0, 10)
164
+ .map(intervention => {
165
+ const blockMatch = intervention.match(/^\[(BLOCK|PAUSE|GATE-\d+)\]\s*(.*?):\s*(.*)/);
166
+ if (blockMatch) {
167
+ const typeRaw = blockMatch[1];
168
+ const type = typeRaw === "BLOCK"
169
+ ? "BLOCK"
170
+ : typeRaw === "PAUSE"
171
+ ? "PAUSE"
172
+ : typeRaw.startsWith("GATE")
173
+ ? "CIRCUIT_BREAKER"
174
+ : "REBALANCE";
175
+ return {
176
+ type,
177
+ agent: blockMatch[2].trim(),
178
+ description: blockMatch[3].trim(),
179
+ };
180
+ }
181
+ return {
182
+ type: "REBALANCE",
183
+ agent: "system",
184
+ description: intervention,
185
+ };
186
+ });
187
+ // Build executive summary from metrics
188
+ const blockCount = governanceStats.blocks;
189
+ const totalInterventions = interventions.length;
190
+ const stabilityPct = (metrics.stabilityScore * 100).toFixed(0);
191
+ const collapsePct = (metrics.collapseProbability * 100).toFixed(0);
192
+ const executiveSummary = blockCount > 0
193
+ ? `Governance engine blocked ${blockCount} high-risk actions and applied ${totalInterventions} interventions. System stability: ${stabilityPct}%. Collapse probability held at ${collapsePct}%.`
194
+ : `Governance constraints applied ${totalInterventions} interventions, maintaining ${stabilityPct}% system stability with ${collapsePct}% collapse risk.`;
195
+ // Observations derived from trace
196
+ const observations = [];
197
+ if (blockCount > 2) {
198
+ observations.push("Multiple block events indicate aggressive agent behavior was successfully contained by invariant enforcement.");
199
+ }
200
+ if (metrics.stabilityScore > 0.6) {
201
+ observations.push("Governance rules maintained system stability above critical thresholds throughout the simulation.");
202
+ }
203
+ if (metrics.maxVolatility > 0.5) {
204
+ observations.push(`Peak volatility of ${(metrics.maxVolatility * 100).toFixed(0)}% was dampened by circuit breaker gates.`);
205
+ }
206
+ if (governanceStats.rulesFired > 3) {
207
+ observations.push(`${governanceStats.rulesFired} world rules fired during deterministic simulation, shaping agent behavior across rounds.`);
208
+ }
209
+ if (observations.length === 0) {
210
+ observations.push("Governance constraints maintained equilibrium with minimal intervention required.");
211
+ }
212
+ return {
213
+ executiveSummary,
214
+ keyEvents,
215
+ governanceActions,
216
+ metrics: {
217
+ volatilityReduction: `${(metrics.maxVolatility * 100).toFixed(0)}% peak`,
218
+ collapseRisk: `${collapsePct}%`,
219
+ stabilityScore: `${stabilityPct}%`,
220
+ interventionCount: totalInterventions,
221
+ blockedActions: blockCount,
222
+ },
223
+ counterfactual: totalInterventions > 3
224
+ ? `Without governance constraints, ${blockCount} blocked actions would have proceeded unchecked, likely increasing volatility beyond recovery thresholds.`
225
+ : undefined,
226
+ observations,
227
+ generatedBy: "deterministic",
228
+ governedBy: "NeuroverseOS",
229
+ };
230
+ }
231
+ // ============================================
232
+ // BUILT-IN PROVIDER: ANTHROPIC (CLAUDE)
233
+ // ============================================
234
+ /**
235
+ * Anthropic Claude provider — real AI reasoning for NeuroverseOS.
236
+ *
237
+ * Requires ANTHROPIC_API_KEY environment variable.
238
+ * Uses Claude to translate unstructured input into normalized events
239
+ * and generate governed reports from simulation traces.
240
+ */
241
+ class AnthropicProvider {
242
+ name = "anthropic";
243
+ client = null;
244
+ model;
245
+ _apiKey;
246
+ constructor(options) {
247
+ this.model = options?.model ?? "claude-sonnet-4-20250514";
248
+ const key = options?.apiKey ?? process.env.ANTHROPIC_API_KEY;
249
+ if (!key) {
250
+ throw new Error("AnthropicProvider requires ANTHROPIC_API_KEY environment variable or apiKey option");
251
+ }
252
+ this._apiKey = key;
253
+ }
254
+ async getClient() {
255
+ if (!this.client) {
256
+ try {
257
+ const sdk = await Promise.resolve().then(() => __importStar(require("@anthropic-ai/sdk")));
258
+ const Anthropic = sdk.default ?? sdk.Anthropic;
259
+ this.client = new Anthropic({ apiKey: this._apiKey });
260
+ }
261
+ catch {
262
+ throw new Error("Failed to load @anthropic-ai/sdk. Install it: npm install @anthropic-ai/sdk");
263
+ }
264
+ }
265
+ return this.client;
266
+ }
267
+ async translate(input) {
268
+ const client = await this.getClient();
269
+ const response = await client.messages.create({
270
+ model: this.model,
271
+ max_tokens: 1024,
272
+ system: TRANSLATE_SYSTEM_PROMPT,
273
+ messages: [{ role: "user", content: input }],
274
+ });
275
+ const text = response.content[0]?.type === "text" ? response.content[0].text : "[]";
276
+ return safeParseEventArray(text);
277
+ }
278
+ async summarize(trace) {
279
+ const client = await this.getClient();
280
+ const traceJson = buildTraceSummaryInput(trace);
281
+ const response = await client.messages.create({
282
+ model: this.model,
283
+ max_tokens: 2048,
284
+ system: SUMMARIZE_SYSTEM_PROMPT,
285
+ messages: [{ role: "user", content: `Analyze this simulation trace:\n${traceJson}` }],
286
+ });
287
+ const text = response.content[0]?.type === "text" ? response.content[0].text : "{}";
288
+ try {
289
+ return JSON.parse(text);
290
+ }
291
+ catch {
292
+ return generateDeterministicReport(trace);
293
+ }
294
+ }
295
+ }
296
+ exports.AnthropicProvider = AnthropicProvider;
297
+ // ============================================
298
+ // BUILT-IN PROVIDER: OPENAI-COMPATIBLE
299
+ // ============================================
300
+ /**
301
+ * OpenAI-compatible provider — works with ANY API that speaks the
302
+ * OpenAI chat completions format:
303
+ *
304
+ * - OpenAI (GPT-4, GPT-4o, o1, etc.)
305
+ * - Local LLMs: Ollama, LM Studio, vLLM, llama.cpp, LocalAI
306
+ * - Cloud APIs: Groq, Together, Fireworks, Mistral, Deepseek, Perplexity
307
+ * - Any custom endpoint that implements POST /v1/chat/completions
308
+ *
309
+ * Configuration via env vars:
310
+ * OPENAI_API_KEY — API key (set to "none" or "local" for local models)
311
+ * OPENAI_BASE_URL — Custom base URL (default: https://api.openai.com/v1)
312
+ * OPENAI_MODEL — Model name (default: gpt-4o)
313
+ *
314
+ * Or via constructor options for full control.
315
+ */
316
+ class OpenAICompatibleProvider {
317
+ name;
318
+ model;
319
+ baseUrl;
320
+ apiKey;
321
+ headers;
322
+ constructor(options) {
323
+ this.name = options?.name ?? "openai";
324
+ this.model = options?.model ?? process.env.OPENAI_MODEL ?? "gpt-4o";
325
+ this.apiKey = options?.apiKey ?? process.env.OPENAI_API_KEY ?? "";
326
+ this.baseUrl = (options?.baseUrl ?? process.env.OPENAI_BASE_URL ?? "https://api.openai.com/v1")
327
+ .replace(/\/+$/, ""); // strip trailing slashes
328
+ this.headers = options?.headers ?? {};
329
+ }
330
+ async chatCompletion(systemPrompt, userMessage, maxTokens) {
331
+ const url = `${this.baseUrl}/chat/completions`;
332
+ const headers = {
333
+ "Content-Type": "application/json",
334
+ ...this.headers,
335
+ };
336
+ // Only add Authorization if we have a real key (not "none"/"local"/"")
337
+ if (this.apiKey && this.apiKey !== "none" && this.apiKey !== "local") {
338
+ headers["Authorization"] = `Bearer ${this.apiKey}`;
339
+ }
340
+ const body = JSON.stringify({
341
+ model: this.model,
342
+ max_tokens: maxTokens,
343
+ temperature: 0.7,
344
+ messages: [
345
+ { role: "system", content: systemPrompt },
346
+ { role: "user", content: userMessage },
347
+ ],
348
+ });
349
+ const response = await fetch(url, { method: "POST", headers, body });
350
+ if (!response.ok) {
351
+ const errText = await response.text().catch(() => "");
352
+ throw new Error(`${this.name} API error ${response.status}: ${errText.slice(0, 200)}`);
353
+ }
354
+ const json = await response.json();
355
+ return json.choices?.[0]?.message?.content ?? "";
356
+ }
357
+ async translate(input) {
358
+ const text = await this.chatCompletion(TRANSLATE_SYSTEM_PROMPT, input, 1024);
359
+ return safeParseEventArray(text);
360
+ }
361
+ async summarize(trace) {
362
+ const traceJson = buildTraceSummaryInput(trace);
363
+ const text = await this.chatCompletion(SUMMARIZE_SYSTEM_PROMPT, `Analyze this simulation trace:\n${traceJson}`, 2048);
364
+ try {
365
+ return JSON.parse(text);
366
+ }
367
+ catch {
368
+ return generateDeterministicReport(trace);
369
+ }
370
+ }
371
+ }
372
+ exports.OpenAICompatibleProvider = OpenAICompatibleProvider;
373
+ // ============================================
374
+ // SHARED PROMPTS & HELPERS
375
+ // ============================================
376
+ const TRANSLATE_SYSTEM_PROMPT = `You are an event normalizer for NeuroverseOS, a governance simulation platform.
377
+ Given unstructured input, extract structured events. Return ONLY valid JSON — an array of objects with these fields:
378
+ - type (string): event category (market_shock, policy_change, agent_action, etc.)
379
+ - agent (string, optional): who caused it
380
+ - action (string): what happened
381
+ - impact (number, -1 to 1): negative to positive impact
382
+ - confidence (number, 0 to 1): how confident you are
383
+ - severity (number, 0 to 1): how severe
384
+ - sector (string, optional): affected domain
385
+ No markdown, no explanation — just the JSON array.`;
386
+ const SUMMARIZE_SYSTEM_PROMPT = `You are a governance analyst for NeuroverseOS. Analyze simulation trace data and produce a structured report.
387
+ Return ONLY valid JSON matching this schema:
388
+ {
389
+ "executiveSummary": "string — 2-3 sentence overview",
390
+ "keyEvents": [{"round": number, "description": "string", "agents": ["string"], "severity": "low|moderate|high|critical"}],
391
+ "governanceActions": [{"type": "BLOCK|PAUSE|CIRCUIT_BREAKER|REBALANCE", "agent": "string", "description": "string"}],
392
+ "metrics": {"volatilityReduction": "string", "collapseRisk": "string", "stabilityScore": "string", "interventionCount": number, "blockedActions": number},
393
+ "counterfactual": "string — what would have happened without governance",
394
+ "observations": ["string — insight about agent behavior under governance"],
395
+ "generatedBy": "ai",
396
+ "governedBy": "NeuroverseOS"
397
+ }
398
+ Focus on WHAT AGENTS DID DIFFERENTLY under governance — behavioral shifts matter more than raw block counts.`;
399
+ const AGENT_REACTION_SYSTEM_PROMPT = `You are a stakeholder in a NeuroverseOS governance simulation.
400
+ You will be given your identity, the scenario, and the current state.
401
+ React in character. Return ONLY valid JSON:
402
+ {
403
+ "reaction": "1-2 sentence description of your reaction and reasoning",
404
+ "impact": <number from -1 (strongly negative) to 1 (strongly positive)>,
405
+ "confidence": <number from 0 to 1 — how sure you are>,
406
+ "trigger": "brief label for what drove this reaction"
407
+ }`;
408
+ function safeParseEventArray(text) {
409
+ try {
410
+ const parsed = JSON.parse(text);
411
+ return Array.isArray(parsed) ? parsed : [parsed];
412
+ }
413
+ catch {
414
+ return [{
415
+ type: "ai_translation",
416
+ action: "parsed_input",
417
+ impact: 0,
418
+ confidence: 0.5,
419
+ metadata: { raw: text, source: "ai" },
420
+ }];
421
+ }
422
+ }
423
+ function buildTraceSummaryInput(trace) {
424
+ return JSON.stringify({
425
+ scenario: trace.scenario,
426
+ roundCount: trace.rounds.length,
427
+ totalReactions: trace.rounds.reduce((s, r) => s + r.reactions.length, 0),
428
+ metrics: trace.metrics,
429
+ governanceStats: trace.governanceStats,
430
+ interventions: trace.interventions.slice(0, 20),
431
+ sampleRounds: trace.rounds.slice(0, 3),
432
+ });
433
+ }
434
+ /**
435
+ * Generate an AI-powered stakeholder reaction.
436
+ *
437
+ * Works with ANY provider — Anthropic, OpenAI, local LLM, etc.
438
+ * Each agent gets an LLM call to decide how they'd actually react.
439
+ */
440
+ async function generateAIReaction(options) {
441
+ const provider = options.provider ?? getAIProvider(getDefaultProviderName());
442
+ if (provider.name === "deterministic") {
443
+ return {
444
+ reaction: `${options.stakeholderId} evaluates the situation (no AI provider configured)`,
445
+ impact: 0,
446
+ confidence: 0.3,
447
+ trigger: "No AI provider — deterministic fallback",
448
+ };
449
+ }
450
+ const userPrompt = `You are ${options.stakeholderId}${options.stakeholderDescription ? ` (${options.stakeholderDescription})` : ""}.
451
+ Disposition: ${options.stakeholderDisposition ?? "unknown"}
452
+ Priorities: ${options.stakeholderPriorities?.join(", ") ?? "not specified"}
453
+
454
+ SCENARIO: ${options.scenario}
455
+ CURRENT PATH: ${options.pathDescription}
456
+ RISK LEVEL: ${options.pathRisk}
457
+ ROUND: ${options.round + 1}
458
+ ${options.previousReactions?.length ? `\nPREVIOUS REACTIONS THIS ROUND:\n${options.previousReactions.join("\n")}` : ""}
459
+
460
+ How do you react?`;
461
+ try {
462
+ // Route through the appropriate provider
463
+ if (provider instanceof AnthropicProvider) {
464
+ const client = await provider.getClient();
465
+ const response = await client.messages.create({
466
+ model: provider.model,
467
+ max_tokens: 256,
468
+ system: AGENT_REACTION_SYSTEM_PROMPT,
469
+ messages: [{ role: "user", content: userPrompt }],
470
+ });
471
+ const text = response.content[0]?.type === "text" ? response.content[0].text : "{}";
472
+ return parseReactionResponse(text, options.stakeholderId);
473
+ }
474
+ else if (provider instanceof OpenAICompatibleProvider) {
475
+ // Use the OpenAI-compatible chat completions directly
476
+ const text = await provider.chatCompletion(AGENT_REACTION_SYSTEM_PROMPT, userPrompt, 256);
477
+ return parseReactionResponse(text, options.stakeholderId);
478
+ }
479
+ else {
480
+ // Generic provider — use translate as a proxy
481
+ const events = await provider.translate(`Stakeholder ${options.stakeholderId} reacts to: ${options.pathDescription} (risk: ${options.pathRisk})`);
482
+ const event = events[0];
483
+ return {
484
+ reaction: `${options.stakeholderId}: ${event?.action ?? "no reaction"}`,
485
+ impact: event?.impact ?? 0,
486
+ confidence: event?.confidence ?? 0.5,
487
+ trigger: event?.type ?? "provider-generated",
488
+ };
489
+ }
490
+ }
491
+ catch {
492
+ return {
493
+ reaction: `${options.stakeholderId} considers the situation (AI call failed, using fallback)`,
494
+ impact: 0,
495
+ confidence: 0.3,
496
+ trigger: "AI fallback — API error",
497
+ };
498
+ }
499
+ }
500
+ function parseReactionResponse(text, stakeholderId) {
501
+ try {
502
+ const parsed = JSON.parse(text);
503
+ return {
504
+ reaction: String(parsed.reaction ?? `${stakeholderId} reacts`),
505
+ impact: Math.max(-1, Math.min(1, Number(parsed.impact) || 0)),
506
+ confidence: Math.max(0, Math.min(1, Number(parsed.confidence) || 0.5)),
507
+ trigger: String(parsed.trigger ?? "AI-generated reaction"),
508
+ };
509
+ }
510
+ catch {
511
+ return {
512
+ reaction: text?.slice(0, 200) || `${stakeholderId} reacts`,
513
+ impact: 0,
514
+ confidence: 0.3,
515
+ trigger: "AI response (unparsed)",
516
+ };
517
+ }
518
+ }
519
+ // ============================================
520
+ // PROVIDER REGISTRY
521
+ // ============================================
522
+ /** Registry of available AI providers */
523
+ const providerRegistry = new Map();
524
+ /** Register the deterministic (no-AI) provider by default */
525
+ providerRegistry.set("deterministic", () => new DeterministicProvider());
526
+ /** Register the Anthropic (Claude) provider — activates when ANTHROPIC_API_KEY is set */
527
+ providerRegistry.set("anthropic", () => new AnthropicProvider());
528
+ /**
529
+ * Register the OpenAI-compatible provider.
530
+ * Covers: OpenAI, Groq, Together, Fireworks, Mistral, Deepseek, etc.
531
+ */
532
+ providerRegistry.set("openai", () => new OpenAICompatibleProvider());
533
+ /**
534
+ * Register local LLM provider — Ollama, LM Studio, vLLM, llama.cpp, LocalAI.
535
+ * These use the OpenAI-compatible API on localhost.
536
+ *
537
+ * Env vars:
538
+ * LOCAL_LLM_URL — e.g. http://localhost:11434/v1 (Ollama)
539
+ * LOCAL_LLM_MODEL — e.g. llama3, mistral, codellama
540
+ */
541
+ providerRegistry.set("local", () => new OpenAICompatibleProvider({
542
+ name: "local",
543
+ baseUrl: process.env.LOCAL_LLM_URL ?? "http://localhost:11434/v1",
544
+ model: process.env.LOCAL_LLM_MODEL ?? "llama3",
545
+ apiKey: "local",
546
+ }));
547
+ // Convenience aliases for popular providers
548
+ providerRegistry.set("ollama", () => new OpenAICompatibleProvider({
549
+ name: "ollama",
550
+ baseUrl: process.env.OLLAMA_BASE_URL ?? "http://localhost:11434/v1",
551
+ model: process.env.OLLAMA_MODEL ?? "llama3",
552
+ apiKey: "local",
553
+ }));
554
+ providerRegistry.set("groq", () => new OpenAICompatibleProvider({
555
+ name: "groq",
556
+ baseUrl: "https://api.groq.com/openai/v1",
557
+ model: process.env.GROQ_MODEL ?? "llama-3.3-70b-versatile",
558
+ apiKey: process.env.GROQ_API_KEY ?? "",
559
+ }));
560
+ providerRegistry.set("together", () => new OpenAICompatibleProvider({
561
+ name: "together",
562
+ baseUrl: "https://api.together.xyz/v1",
563
+ model: process.env.TOGETHER_MODEL ?? "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
564
+ apiKey: process.env.TOGETHER_API_KEY ?? "",
565
+ }));
566
+ providerRegistry.set("mistral", () => new OpenAICompatibleProvider({
567
+ name: "mistral",
568
+ baseUrl: "https://api.mistral.ai/v1",
569
+ model: process.env.MISTRAL_MODEL ?? "mistral-large-latest",
570
+ apiKey: process.env.MISTRAL_API_KEY ?? "",
571
+ }));
572
+ providerRegistry.set("deepseek", () => new OpenAICompatibleProvider({
573
+ name: "deepseek",
574
+ baseUrl: "https://api.deepseek.com/v1",
575
+ model: process.env.DEEPSEEK_MODEL ?? "deepseek-chat",
576
+ apiKey: process.env.DEEPSEEK_API_KEY ?? "",
577
+ }));
578
+ providerRegistry.set("fireworks", () => new OpenAICompatibleProvider({
579
+ name: "fireworks",
580
+ baseUrl: "https://api.fireworks.ai/inference/v1",
581
+ model: process.env.FIREWORKS_MODEL ?? "accounts/fireworks/models/llama-v3p1-70b-instruct",
582
+ apiKey: process.env.FIREWORKS_API_KEY ?? "",
583
+ }));
584
+ /**
585
+ * Auto-detect the best available provider based on environment variables.
586
+ *
587
+ * Priority:
588
+ * 1. ANTHROPIC_API_KEY → anthropic (Claude)
589
+ * 2. OPENAI_API_KEY → openai (GPT-4, etc.)
590
+ * 3. GROQ_API_KEY → groq
591
+ * 4. TOGETHER_API_KEY → together
592
+ * 5. MISTRAL_API_KEY → mistral
593
+ * 6. DEEPSEEK_API_KEY → deepseek
594
+ * 7. FIREWORKS_API_KEY → fireworks
595
+ * 8. LOCAL_LLM_URL → local (Ollama, LM Studio, vLLM, etc.)
596
+ * 9. OLLAMA_BASE_URL → ollama
597
+ * 10. (none) → deterministic (Math.random(), no AI)
598
+ */
599
+ function getDefaultProviderName() {
600
+ if (process.env.ANTHROPIC_API_KEY)
601
+ return "anthropic";
602
+ if (process.env.OPENAI_API_KEY)
603
+ return "openai";
604
+ if (process.env.GROQ_API_KEY)
605
+ return "groq";
606
+ if (process.env.TOGETHER_API_KEY)
607
+ return "together";
608
+ if (process.env.MISTRAL_API_KEY)
609
+ return "mistral";
610
+ if (process.env.DEEPSEEK_API_KEY)
611
+ return "deepseek";
612
+ if (process.env.FIREWORKS_API_KEY)
613
+ return "fireworks";
614
+ if (process.env.LOCAL_LLM_URL)
615
+ return "local";
616
+ if (process.env.OLLAMA_BASE_URL)
617
+ return "ollama";
618
+ return "deterministic";
619
+ }
620
+ /** Register a custom AI provider */
621
+ function registerAIProvider(name, factory) {
622
+ providerRegistry.set(name, factory);
623
+ }
624
+ /** Get the active AI provider */
625
+ function getAIProvider(name) {
626
+ const providerName = name ?? "deterministic";
627
+ const factory = providerRegistry.get(providerName);
628
+ if (!factory) {
629
+ console.warn(`AI provider "${providerName}" not found, falling back to deterministic`);
630
+ return new DeterministicProvider();
631
+ }
632
+ return factory();
633
+ }
634
+ /** List available provider names */
635
+ function listAIProviders() {
636
+ return [...providerRegistry.keys()];
637
+ }
638
+ /**
639
+ * Evaluate an AI action through governance.
640
+ *
641
+ * This is the critical function: ALL AI calls go through here.
642
+ * The AI actor is subject to the same rules as any other agent.
643
+ */
644
+ async function evaluateAIAction(actor, action, execute) {
645
+ const violations = [];
646
+ // Pre-execution: verify actor has permission
647
+ if (!actor.permissions.includes(action)) {
648
+ violations.push(`Actor ${actor.id} lacks permission for action: ${action}`);
649
+ }
650
+ // Execute the AI action
651
+ const result = await execute();
652
+ // Post-execution: validate constraints
653
+ if (action === "generate_report" && result && typeof result === "object") {
654
+ const report = result;
655
+ for (const constraint of actor.constraints) {
656
+ if (constraint === "must_include_blocked_actions") {
657
+ if (!report.governanceActions || report.governanceActions.length === 0) {
658
+ if (report.metrics?.blockedActions && report.metrics.blockedActions > 0) {
659
+ violations.push("Report missing governance action details despite blocks occurring");
660
+ }
661
+ }
662
+ }
663
+ if (constraint === "must_include_metrics") {
664
+ if (!report.metrics) {
665
+ violations.push("Report missing required metrics section");
666
+ }
667
+ }
668
+ }
669
+ }
670
+ return {
671
+ result,
672
+ actor: actor.id,
673
+ action,
674
+ governed: true,
675
+ trace: {
676
+ constraints: actor.constraints,
677
+ passed: violations.length === 0,
678
+ violations,
679
+ },
680
+ };
681
+ }