@neuroverseos/nv-sim 0.1.4 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +346 -68
  2. package/dist/adapters/mirofish.js +461 -0
  3. package/dist/adapters/scienceclaw.js +750 -0
  4. package/dist/assets/index-B64NuIXu.css +1 -0
  5. package/dist/assets/index-DbzSnYxr.js +532 -0
  6. package/dist/assets/{reportEngine-BfteK4MN.js → reportEngine-DKWTrP6-.js} +1 -1
  7. package/dist/components/ConstraintsPanel.js +11 -0
  8. package/dist/components/StakeholderBuilder.js +32 -0
  9. package/dist/components/ui/badge.js +24 -0
  10. package/dist/components/ui/button.js +70 -0
  11. package/dist/components/ui/card.js +57 -0
  12. package/dist/components/ui/input.js +44 -0
  13. package/dist/components/ui/label.js +45 -0
  14. package/dist/components/ui/select.js +70 -0
  15. package/dist/engine/aiProvider.js +427 -2
  16. package/dist/engine/auditTrace.js +352 -0
  17. package/dist/engine/behavioralAnalysis.js +605 -0
  18. package/dist/engine/cli.js +1087 -13
  19. package/dist/engine/dynamicsGovernance.js +588 -0
  20. package/dist/engine/fullGovernedLoop.js +367 -0
  21. package/dist/engine/governedSimulation.js +77 -6
  22. package/dist/engine/index.js +41 -1
  23. package/dist/engine/liveVisualizer.js +2787 -360
  24. package/dist/engine/metrics/science.metrics.js +335 -0
  25. package/dist/engine/narrativeInjection.js +55 -0
  26. package/dist/engine/policyEnforcement.js +1611 -0
  27. package/dist/engine/policyEngine.js +799 -0
  28. package/dist/engine/primeRadiant.js +540 -0
  29. package/dist/engine/scenarioCapsule.js +56 -0
  30. package/dist/engine/scenarioComparison.js +463 -0
  31. package/dist/engine/scenarioLibrary.js +17 -0
  32. package/dist/engine/swarmSimulation.js +54 -1
  33. package/dist/engine/worldComparison.js +164 -0
  34. package/dist/engine/worldStorage.js +232 -0
  35. package/dist/index.html +2 -2
  36. package/dist/lib/reasoningEngine.js +290 -0
  37. package/dist/lib/simulationAdapter.js +686 -0
  38. package/dist/lib/swarmParser.js +291 -0
  39. package/dist/lib/types.js +2 -0
  40. package/dist/lib/utils.js +8 -0
  41. package/dist/runtime/govern.js +473 -0
  42. package/dist/runtime/index.js +75 -0
  43. package/dist/runtime/types.js +11 -0
  44. package/package.json +5 -2
  45. package/dist/assets/index-DHKd4rcV.js +0 -338
  46. package/dist/assets/index-SyyA3z3U.css +0 -1
  47. package/dist/assets/swarmSimulation-DHDqjfMa.js +0 -1
@@ -15,9 +15,44 @@
15
15
  * ai_translator — converts unstructured input → normalized events
16
16
  * ai_analyst — generates governed reports from traces
17
17
  */
18
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
19
+ if (k2 === undefined) k2 = k;
20
+ var desc = Object.getOwnPropertyDescriptor(m, k);
21
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
22
+ desc = { enumerable: true, get: function() { return m[k]; } };
23
+ }
24
+ Object.defineProperty(o, k2, desc);
25
+ }) : (function(o, m, k, k2) {
26
+ if (k2 === undefined) k2 = k;
27
+ o[k2] = m[k];
28
+ }));
29
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
30
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
31
+ }) : function(o, v) {
32
+ o["default"] = v;
33
+ });
34
+ var __importStar = (this && this.__importStar) || (function () {
35
+ var ownKeys = function(o) {
36
+ ownKeys = Object.getOwnPropertyNames || function (o) {
37
+ var ar = [];
38
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
39
+ return ar;
40
+ };
41
+ return ownKeys(o);
42
+ };
43
+ return function (mod) {
44
+ if (mod && mod.__esModule) return mod;
45
+ var result = {};
46
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
47
+ __setModuleDefault(result, mod);
48
+ return result;
49
+ };
50
+ })();
18
51
  Object.defineProperty(exports, "__esModule", { value: true });
19
- exports.DeterministicProvider = exports.AI_ROLES = void 0;
52
+ exports.OpenAICompatibleProvider = exports.AnthropicProvider = exports.DeterministicProvider = exports.AI_ROLES = void 0;
20
53
  exports.generateDeterministicReport = generateDeterministicReport;
54
+ exports.generateAIReaction = generateAIReaction;
55
+ exports.getDefaultProviderName = getDefaultProviderName;
21
56
  exports.registerAIProvider = registerAIProvider;
22
57
  exports.getAIProvider = getAIProvider;
23
58
  exports.listAIProviders = listAIProviders;
@@ -37,13 +72,21 @@ exports.AI_ROLES = [
37
72
  {
38
73
  id: "ai_analyst",
39
74
  type: "ai",
40
- permissions: ["generate_report", "summarize_trace"],
75
+ permissions: [
76
+ "generate_report",
77
+ "summarize_trace",
78
+ "suggest_experiments",
79
+ "detect_archetype",
80
+ "synthesize_strategy",
81
+ "compare_experiments",
82
+ ],
41
83
  constraints: [
42
84
  "must_reference_trace",
43
85
  "must_include_blocked_actions",
44
86
  "must_include_metrics",
45
87
  "no_unverifiable_claims",
46
88
  "no_speculation_without_flag",
89
+ "must_include_confidence",
47
90
  ],
48
91
  },
49
92
  ];
@@ -186,12 +229,394 @@ function generateDeterministicReport(trace) {
186
229
  };
187
230
  }
188
231
  // ============================================
232
+ // BUILT-IN PROVIDER: ANTHROPIC (CLAUDE)
233
+ // ============================================
234
+ /**
235
+ * Anthropic Claude provider — real AI reasoning for NeuroverseOS.
236
+ *
237
+ * Requires ANTHROPIC_API_KEY environment variable.
238
+ * Uses Claude to translate unstructured input into normalized events
239
+ * and generate governed reports from simulation traces.
240
+ */
241
+ class AnthropicProvider {
242
+ name = "anthropic";
243
+ client = null;
244
+ model;
245
+ _apiKey;
246
+ constructor(options) {
247
+ this.model = options?.model ?? "claude-sonnet-4-20250514";
248
+ const key = options?.apiKey ?? process.env.ANTHROPIC_API_KEY;
249
+ if (!key) {
250
+ throw new Error("AnthropicProvider requires ANTHROPIC_API_KEY environment variable or apiKey option");
251
+ }
252
+ this._apiKey = key;
253
+ }
254
+ async getClient() {
255
+ if (!this.client) {
256
+ try {
257
+ const sdk = await Promise.resolve().then(() => __importStar(require("@anthropic-ai/sdk")));
258
+ const Anthropic = sdk.default ?? sdk.Anthropic;
259
+ this.client = new Anthropic({ apiKey: this._apiKey });
260
+ }
261
+ catch {
262
+ throw new Error("Failed to load @anthropic-ai/sdk. Install it: npm install @anthropic-ai/sdk");
263
+ }
264
+ }
265
+ return this.client;
266
+ }
267
+ async translate(input) {
268
+ const client = await this.getClient();
269
+ const response = await client.messages.create({
270
+ model: this.model,
271
+ max_tokens: 1024,
272
+ system: TRANSLATE_SYSTEM_PROMPT,
273
+ messages: [{ role: "user", content: input }],
274
+ });
275
+ const text = response.content[0]?.type === "text" ? response.content[0].text : "[]";
276
+ return safeParseEventArray(text);
277
+ }
278
+ async summarize(trace) {
279
+ const client = await this.getClient();
280
+ const traceJson = buildTraceSummaryInput(trace);
281
+ const response = await client.messages.create({
282
+ model: this.model,
283
+ max_tokens: 2048,
284
+ system: SUMMARIZE_SYSTEM_PROMPT,
285
+ messages: [{ role: "user", content: `Analyze this simulation trace:\n${traceJson}` }],
286
+ });
287
+ const text = response.content[0]?.type === "text" ? response.content[0].text : "{}";
288
+ try {
289
+ return JSON.parse(text);
290
+ }
291
+ catch {
292
+ return generateDeterministicReport(trace);
293
+ }
294
+ }
295
+ }
296
+ exports.AnthropicProvider = AnthropicProvider;
297
+ // ============================================
298
+ // BUILT-IN PROVIDER: OPENAI-COMPATIBLE
299
+ // ============================================
300
+ /**
301
+ * OpenAI-compatible provider — works with ANY API that speaks the
302
+ * OpenAI chat completions format:
303
+ *
304
+ * - OpenAI (GPT-4, GPT-4o, o1, etc.)
305
+ * - Local LLMs: Ollama, LM Studio, vLLM, llama.cpp, LocalAI
306
+ * - Cloud APIs: Groq, Together, Fireworks, Mistral, Deepseek, Perplexity
307
+ * - Any custom endpoint that implements POST /v1/chat/completions
308
+ *
309
+ * Configuration via env vars:
310
+ * OPENAI_API_KEY — API key (set to "none" or "local" for local models)
311
+ * OPENAI_BASE_URL — Custom base URL (default: https://api.openai.com/v1)
312
+ * OPENAI_MODEL — Model name (default: gpt-4o)
313
+ *
314
+ * Or via constructor options for full control.
315
+ */
316
+ class OpenAICompatibleProvider {
317
+ name;
318
+ model;
319
+ baseUrl;
320
+ apiKey;
321
+ headers;
322
+ constructor(options) {
323
+ this.name = options?.name ?? "openai";
324
+ this.model = options?.model ?? process.env.OPENAI_MODEL ?? "gpt-4o";
325
+ this.apiKey = options?.apiKey ?? process.env.OPENAI_API_KEY ?? "";
326
+ this.baseUrl = (options?.baseUrl ?? process.env.OPENAI_BASE_URL ?? "https://api.openai.com/v1")
327
+ .replace(/\/+$/, ""); // strip trailing slashes
328
+ this.headers = options?.headers ?? {};
329
+ }
330
+ async chatCompletion(systemPrompt, userMessage, maxTokens) {
331
+ const url = `${this.baseUrl}/chat/completions`;
332
+ const headers = {
333
+ "Content-Type": "application/json",
334
+ ...this.headers,
335
+ };
336
+ // Only add Authorization if we have a real key (not "none"/"local"/"")
337
+ if (this.apiKey && this.apiKey !== "none" && this.apiKey !== "local") {
338
+ headers["Authorization"] = `Bearer ${this.apiKey}`;
339
+ }
340
+ const body = JSON.stringify({
341
+ model: this.model,
342
+ max_tokens: maxTokens,
343
+ temperature: 0.7,
344
+ messages: [
345
+ { role: "system", content: systemPrompt },
346
+ { role: "user", content: userMessage },
347
+ ],
348
+ });
349
+ const response = await fetch(url, { method: "POST", headers, body });
350
+ if (!response.ok) {
351
+ const errText = await response.text().catch(() => "");
352
+ throw new Error(`${this.name} API error ${response.status}: ${errText.slice(0, 200)}`);
353
+ }
354
+ const json = await response.json();
355
+ return json.choices?.[0]?.message?.content ?? "";
356
+ }
357
+ async translate(input) {
358
+ const text = await this.chatCompletion(TRANSLATE_SYSTEM_PROMPT, input, 1024);
359
+ return safeParseEventArray(text);
360
+ }
361
+ async summarize(trace) {
362
+ const traceJson = buildTraceSummaryInput(trace);
363
+ const text = await this.chatCompletion(SUMMARIZE_SYSTEM_PROMPT, `Analyze this simulation trace:\n${traceJson}`, 2048);
364
+ try {
365
+ return JSON.parse(text);
366
+ }
367
+ catch {
368
+ return generateDeterministicReport(trace);
369
+ }
370
+ }
371
+ }
372
+ exports.OpenAICompatibleProvider = OpenAICompatibleProvider;
373
+ // ============================================
374
+ // SHARED PROMPTS & HELPERS
375
+ // ============================================
376
+ const TRANSLATE_SYSTEM_PROMPT = `You are an event normalizer for NeuroverseOS, a governance simulation platform.
377
+ Given unstructured input, extract structured events. Return ONLY valid JSON — an array of objects with these fields:
378
+ - type (string): event category (market_shock, policy_change, agent_action, etc.)
379
+ - agent (string, optional): who caused it
380
+ - action (string): what happened
381
+ - impact (number, -1 to 1): negative to positive impact
382
+ - confidence (number, 0 to 1): how confident you are
383
+ - severity (number, 0 to 1): how severe
384
+ - sector (string, optional): affected domain
385
+ No markdown, no explanation — just the JSON array.`;
386
+ const SUMMARIZE_SYSTEM_PROMPT = `You are a governance analyst for NeuroverseOS. Analyze simulation trace data and produce a structured report.
387
+ Return ONLY valid JSON matching this schema:
388
+ {
389
+ "executiveSummary": "string — 2-3 sentence overview",
390
+ "keyEvents": [{"round": number, "description": "string", "agents": ["string"], "severity": "low|moderate|high|critical"}],
391
+ "governanceActions": [{"type": "BLOCK|PAUSE|CIRCUIT_BREAKER|REBALANCE", "agent": "string", "description": "string"}],
392
+ "metrics": {"volatilityReduction": "string", "collapseRisk": "string", "stabilityScore": "string", "interventionCount": number, "blockedActions": number},
393
+ "counterfactual": "string — what would have happened without governance",
394
+ "observations": ["string — insight about agent behavior under governance"],
395
+ "generatedBy": "ai",
396
+ "governedBy": "NeuroverseOS"
397
+ }
398
+ Focus on WHAT AGENTS DID DIFFERENTLY under governance — behavioral shifts matter more than raw block counts.`;
399
+ const AGENT_REACTION_SYSTEM_PROMPT = `You are a stakeholder in a NeuroverseOS governance simulation.
400
+ You will be given your identity, the scenario, and the current state.
401
+ React in character. Return ONLY valid JSON:
402
+ {
403
+ "reaction": "1-2 sentence description of your reaction and reasoning",
404
+ "impact": <number from -1 (strongly negative) to 1 (strongly positive)>,
405
+ "confidence": <number from 0 to 1 — how sure you are>,
406
+ "trigger": "brief label for what drove this reaction"
407
+ }`;
408
+ function safeParseEventArray(text) {
409
+ try {
410
+ const parsed = JSON.parse(text);
411
+ return Array.isArray(parsed) ? parsed : [parsed];
412
+ }
413
+ catch {
414
+ return [{
415
+ type: "ai_translation",
416
+ action: "parsed_input",
417
+ impact: 0,
418
+ confidence: 0.5,
419
+ metadata: { raw: text, source: "ai" },
420
+ }];
421
+ }
422
+ }
423
+ function buildTraceSummaryInput(trace) {
424
+ return JSON.stringify({
425
+ scenario: trace.scenario,
426
+ roundCount: trace.rounds.length,
427
+ totalReactions: trace.rounds.reduce((s, r) => s + r.reactions.length, 0),
428
+ metrics: trace.metrics,
429
+ governanceStats: trace.governanceStats,
430
+ interventions: trace.interventions.slice(0, 20),
431
+ sampleRounds: trace.rounds.slice(0, 3),
432
+ });
433
+ }
434
+ /**
435
+ * Generate an AI-powered stakeholder reaction.
436
+ *
437
+ * Works with ANY provider — Anthropic, OpenAI, local LLM, etc.
438
+ * Each agent gets an LLM call to decide how they'd actually react.
439
+ */
440
+ async function generateAIReaction(options) {
441
+ const provider = options.provider ?? getAIProvider(getDefaultProviderName());
442
+ if (provider.name === "deterministic") {
443
+ return {
444
+ reaction: `${options.stakeholderId} evaluates the situation (no AI provider configured)`,
445
+ impact: 0,
446
+ confidence: 0.3,
447
+ trigger: "No AI provider — deterministic fallback",
448
+ };
449
+ }
450
+ const userPrompt = `You are ${options.stakeholderId}${options.stakeholderDescription ? ` (${options.stakeholderDescription})` : ""}.
451
+ Disposition: ${options.stakeholderDisposition ?? "unknown"}
452
+ Priorities: ${options.stakeholderPriorities?.join(", ") ?? "not specified"}
453
+
454
+ SCENARIO: ${options.scenario}
455
+ CURRENT PATH: ${options.pathDescription}
456
+ RISK LEVEL: ${options.pathRisk}
457
+ ROUND: ${options.round + 1}
458
+ ${options.previousReactions?.length ? `\nPREVIOUS REACTIONS THIS ROUND:\n${options.previousReactions.join("\n")}` : ""}
459
+
460
+ How do you react?`;
461
+ try {
462
+ // Route through the appropriate provider
463
+ if (provider instanceof AnthropicProvider) {
464
+ const client = await provider.getClient();
465
+ const response = await client.messages.create({
466
+ model: provider.model,
467
+ max_tokens: 256,
468
+ system: AGENT_REACTION_SYSTEM_PROMPT,
469
+ messages: [{ role: "user", content: userPrompt }],
470
+ });
471
+ const text = response.content[0]?.type === "text" ? response.content[0].text : "{}";
472
+ return parseReactionResponse(text, options.stakeholderId);
473
+ }
474
+ else if (provider instanceof OpenAICompatibleProvider) {
475
+ // Use the OpenAI-compatible chat completions directly
476
+ const text = await provider.chatCompletion(AGENT_REACTION_SYSTEM_PROMPT, userPrompt, 256);
477
+ return parseReactionResponse(text, options.stakeholderId);
478
+ }
479
+ else {
480
+ // Generic provider — use translate as a proxy
481
+ const events = await provider.translate(`Stakeholder ${options.stakeholderId} reacts to: ${options.pathDescription} (risk: ${options.pathRisk})`);
482
+ const event = events[0];
483
+ return {
484
+ reaction: `${options.stakeholderId}: ${event?.action ?? "no reaction"}`,
485
+ impact: event?.impact ?? 0,
486
+ confidence: event?.confidence ?? 0.5,
487
+ trigger: event?.type ?? "provider-generated",
488
+ };
489
+ }
490
+ }
491
+ catch {
492
+ return {
493
+ reaction: `${options.stakeholderId} considers the situation (AI call failed, using fallback)`,
494
+ impact: 0,
495
+ confidence: 0.3,
496
+ trigger: "AI fallback — API error",
497
+ };
498
+ }
499
+ }
500
+ function parseReactionResponse(text, stakeholderId) {
501
+ try {
502
+ const parsed = JSON.parse(text);
503
+ return {
504
+ reaction: String(parsed.reaction ?? `${stakeholderId} reacts`),
505
+ impact: Math.max(-1, Math.min(1, Number(parsed.impact) || 0)),
506
+ confidence: Math.max(0, Math.min(1, Number(parsed.confidence) || 0.5)),
507
+ trigger: String(parsed.trigger ?? "AI-generated reaction"),
508
+ };
509
+ }
510
+ catch {
511
+ return {
512
+ reaction: text?.slice(0, 200) || `${stakeholderId} reacts`,
513
+ impact: 0,
514
+ confidence: 0.3,
515
+ trigger: "AI response (unparsed)",
516
+ };
517
+ }
518
+ }
519
+ // ============================================
189
520
  // PROVIDER REGISTRY
190
521
  // ============================================
191
522
  /** Registry of available AI providers */
192
523
  const providerRegistry = new Map();
193
524
  /** Register the deterministic (no-AI) provider by default */
194
525
  providerRegistry.set("deterministic", () => new DeterministicProvider());
526
+ /** Register the Anthropic (Claude) provider — activates when ANTHROPIC_API_KEY is set */
527
+ providerRegistry.set("anthropic", () => new AnthropicProvider());
528
+ /**
529
+ * Register the OpenAI-compatible provider.
530
+ * Covers: OpenAI, Groq, Together, Fireworks, Mistral, Deepseek, etc.
531
+ */
532
+ providerRegistry.set("openai", () => new OpenAICompatibleProvider());
533
+ /**
534
+ * Register local LLM provider — Ollama, LM Studio, vLLM, llama.cpp, LocalAI.
535
+ * These use the OpenAI-compatible API on localhost.
536
+ *
537
+ * Env vars:
538
+ * LOCAL_LLM_URL — e.g. http://localhost:11434/v1 (Ollama)
539
+ * LOCAL_LLM_MODEL — e.g. llama3, mistral, codellama
540
+ */
541
+ providerRegistry.set("local", () => new OpenAICompatibleProvider({
542
+ name: "local",
543
+ baseUrl: process.env.LOCAL_LLM_URL ?? "http://localhost:11434/v1",
544
+ model: process.env.LOCAL_LLM_MODEL ?? "llama3",
545
+ apiKey: "local",
546
+ }));
547
+ // Convenience aliases for popular providers
548
+ providerRegistry.set("ollama", () => new OpenAICompatibleProvider({
549
+ name: "ollama",
550
+ baseUrl: process.env.OLLAMA_BASE_URL ?? "http://localhost:11434/v1",
551
+ model: process.env.OLLAMA_MODEL ?? "llama3",
552
+ apiKey: "local",
553
+ }));
554
+ providerRegistry.set("groq", () => new OpenAICompatibleProvider({
555
+ name: "groq",
556
+ baseUrl: "https://api.groq.com/openai/v1",
557
+ model: process.env.GROQ_MODEL ?? "llama-3.3-70b-versatile",
558
+ apiKey: process.env.GROQ_API_KEY ?? "",
559
+ }));
560
+ providerRegistry.set("together", () => new OpenAICompatibleProvider({
561
+ name: "together",
562
+ baseUrl: "https://api.together.xyz/v1",
563
+ model: process.env.TOGETHER_MODEL ?? "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
564
+ apiKey: process.env.TOGETHER_API_KEY ?? "",
565
+ }));
566
+ providerRegistry.set("mistral", () => new OpenAICompatibleProvider({
567
+ name: "mistral",
568
+ baseUrl: "https://api.mistral.ai/v1",
569
+ model: process.env.MISTRAL_MODEL ?? "mistral-large-latest",
570
+ apiKey: process.env.MISTRAL_API_KEY ?? "",
571
+ }));
572
+ providerRegistry.set("deepseek", () => new OpenAICompatibleProvider({
573
+ name: "deepseek",
574
+ baseUrl: "https://api.deepseek.com/v1",
575
+ model: process.env.DEEPSEEK_MODEL ?? "deepseek-chat",
576
+ apiKey: process.env.DEEPSEEK_API_KEY ?? "",
577
+ }));
578
+ providerRegistry.set("fireworks", () => new OpenAICompatibleProvider({
579
+ name: "fireworks",
580
+ baseUrl: "https://api.fireworks.ai/inference/v1",
581
+ model: process.env.FIREWORKS_MODEL ?? "accounts/fireworks/models/llama-v3p1-70b-instruct",
582
+ apiKey: process.env.FIREWORKS_API_KEY ?? "",
583
+ }));
584
+ /**
585
+ * Auto-detect the best available provider based on environment variables.
586
+ *
587
+ * Priority:
588
+ * 1. ANTHROPIC_API_KEY → anthropic (Claude)
589
+ * 2. OPENAI_API_KEY → openai (GPT-4, etc.)
590
+ * 3. GROQ_API_KEY → groq
591
+ * 4. TOGETHER_API_KEY → together
592
+ * 5. MISTRAL_API_KEY → mistral
593
+ * 6. DEEPSEEK_API_KEY → deepseek
594
+ * 7. FIREWORKS_API_KEY → fireworks
595
+ * 8. LOCAL_LLM_URL → local (Ollama, LM Studio, vLLM, etc.)
596
+ * 9. OLLAMA_BASE_URL → ollama
597
+ * 10. (none) → deterministic (Math.random(), no AI)
598
+ */
599
+ function getDefaultProviderName() {
600
+ if (process.env.ANTHROPIC_API_KEY)
601
+ return "anthropic";
602
+ if (process.env.OPENAI_API_KEY)
603
+ return "openai";
604
+ if (process.env.GROQ_API_KEY)
605
+ return "groq";
606
+ if (process.env.TOGETHER_API_KEY)
607
+ return "together";
608
+ if (process.env.MISTRAL_API_KEY)
609
+ return "mistral";
610
+ if (process.env.DEEPSEEK_API_KEY)
611
+ return "deepseek";
612
+ if (process.env.FIREWORKS_API_KEY)
613
+ return "fireworks";
614
+ if (process.env.LOCAL_LLM_URL)
615
+ return "local";
616
+ if (process.env.OLLAMA_BASE_URL)
617
+ return "ollama";
618
+ return "deterministic";
619
+ }
195
620
  /** Register a custom AI provider */
196
621
  function registerAIProvider(name, factory) {
197
622
  providerRegistry.set(name, factory);