@neuroverseos/governance 0.2.2 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/.well-known/ai-plugin.json +26 -0
  2. package/.well-known/mcp.json +68 -0
  3. package/AGENTS.md +219 -0
  4. package/README.md +64 -4
  5. package/dist/adapters/autoresearch.cjs +196 -0
  6. package/dist/adapters/autoresearch.d.cts +103 -0
  7. package/dist/adapters/autoresearch.d.ts +103 -0
  8. package/dist/adapters/autoresearch.js +7 -0
  9. package/dist/adapters/express.d.cts +1 -1
  10. package/dist/adapters/express.d.ts +1 -1
  11. package/dist/adapters/express.js +1 -1
  12. package/dist/adapters/index.cjs +171 -0
  13. package/dist/adapters/index.d.cts +2 -1
  14. package/dist/adapters/index.d.ts +2 -1
  15. package/dist/adapters/index.js +8 -4
  16. package/dist/adapters/langchain.d.cts +1 -1
  17. package/dist/adapters/langchain.d.ts +1 -1
  18. package/dist/adapters/langchain.js +2 -2
  19. package/dist/adapters/openai.d.cts +1 -1
  20. package/dist/adapters/openai.d.ts +1 -1
  21. package/dist/adapters/openai.js +2 -2
  22. package/dist/adapters/openclaw.d.cts +1 -1
  23. package/dist/adapters/openclaw.d.ts +1 -1
  24. package/dist/adapters/openclaw.js +2 -2
  25. package/dist/chunk-T5EUJQE5.js +172 -0
  26. package/dist/cli/neuroverse.cjs +1157 -184
  27. package/dist/cli/neuroverse.js +18 -6
  28. package/dist/cli/run.js +2 -2
  29. package/dist/{doctor-QV6HELS5.js → doctor-XPDLEYXN.js} +1 -0
  30. package/dist/{guard-contract-Cm91Kp4j.d.cts → guard-contract-WZx__PmU.d.cts} +1 -1
  31. package/dist/{guard-contract-Cm91Kp4j.d.ts → guard-contract-WZx__PmU.d.ts} +1 -1
  32. package/dist/index.d.cts +2 -2
  33. package/dist/index.d.ts +2 -2
  34. package/dist/index.js +28 -28
  35. package/dist/infer-world-7GVZWFX4.js +543 -0
  36. package/dist/init-world-VWMQZQC7.js +223 -0
  37. package/dist/{mcp-server-LZVJHBT5.js → mcp-server-FPVSU32Z.js} +2 -2
  38. package/dist/{session-VISISNWJ.js → session-EKTRSR7C.js} +2 -2
  39. package/dist/worlds/autoresearch.nv-world.md +230 -0
  40. package/llms.txt +79 -0
  41. package/openapi.yaml +230 -0
  42. package/package.json +15 -4
  43. package/dist/{chunk-SKU3GAPD.js → chunk-2PQU3VAN.js} +3 -3
  44. package/dist/{chunk-KEST3MWO.js → chunk-4A7LISES.js} +3 -3
  45. package/dist/{chunk-RWXVAH6P.js → chunk-COT5XS4V.js} +3 -3
  46. package/dist/{chunk-OHAC6HJE.js → chunk-ER62HNGF.js} +3 -3
  47. package/dist/{chunk-DPVS43ZT.js → chunk-OGL7QXZS.js} +3 -3
  48. package/dist/{guard-GFLQZY6U.js → guard-RV65TT4L.js} +1 -1
  49. package/dist/{playground-FGOMASHN.js → playground-E664U4T6.js} +1 -1
  50. package/dist/{redteam-SK7AMIG3.js → redteam-Z7WREJ44.js} +1 -1
  51. package/dist/{test-75AVHC3R.js → test-OGXJK4QU.js} +1 -1
@@ -0,0 +1,543 @@
1
+ import "./chunk-YZFATT7X.js";
2
+
3
+ // src/cli/infer-world.ts
4
+ import { existsSync, readFileSync, readdirSync, statSync } from "fs";
5
+ import { writeFile } from "fs/promises";
6
+ import { join, basename, resolve } from "path";
7
+ var SCAN_PATTERNS = {
8
+ "program.md": { signal: "experiment goals / agent instructions", confidence: 0.9 },
9
+ "train.py": { signal: "model training code", confidence: 0.8 },
10
+ "train.js": { signal: "model training code", confidence: 0.8 },
11
+ "train.ts": { signal: "model training code", confidence: 0.8 },
12
+ "prepare.py": { signal: "data preparation", confidence: 0.7 },
13
+ "dataset.yaml": { signal: "dataset configuration", confidence: 0.8 },
14
+ "dataset.json": { signal: "dataset configuration", confidence: 0.8 },
15
+ "config.yaml": { signal: "project configuration", confidence: 0.5 },
16
+ "config.json": { signal: "project configuration", confidence: 0.5 },
17
+ "pyproject.toml": { signal: "Python project", confidence: 0.4 },
18
+ "requirements.txt": { signal: "Python dependencies", confidence: 0.5 },
19
+ "setup.py": { signal: "Python package", confidence: 0.4 },
20
+ "Makefile": { signal: "build automation", confidence: 0.3 },
21
+ "docker-compose.yml": { signal: "containerized environment", confidence: 0.4 },
22
+ "Dockerfile": { signal: "containerized environment", confidence: 0.4 }
23
+ };
24
+ var RESEARCH_DIRECTORIES = ["experiments", "results", "notebooks", "models", "checkpoints", "logs", "data", "eval"];
25
+ function scanRepo(repoPath) {
26
+ const signals = [];
27
+ let entries;
28
+ try {
29
+ entries = readdirSync(repoPath);
30
+ } catch {
31
+ return signals;
32
+ }
33
+ for (const entry of entries) {
34
+ const lower = entry.toLowerCase();
35
+ const fullPath = join(repoPath, entry);
36
+ if (SCAN_PATTERNS[lower]) {
37
+ signals.push({
38
+ path: entry,
39
+ signal: SCAN_PATTERNS[lower].signal,
40
+ confidence: SCAN_PATTERNS[lower].confidence
41
+ });
42
+ }
43
+ if (entry !== lower && SCAN_PATTERNS[entry]) {
44
+ signals.push({
45
+ path: entry,
46
+ signal: SCAN_PATTERNS[entry].signal,
47
+ confidence: SCAN_PATTERNS[entry].confidence
48
+ });
49
+ }
50
+ if (lower === "readme.md" || lower === "readme.rst" || lower === "readme.txt") {
51
+ signals.push({ path: entry, signal: "documentation", confidence: 0.3 });
52
+ }
53
+ if (lower === "package.json" || lower === "cargo.toml" || lower === "go.mod") {
54
+ signals.push({ path: entry, signal: "project manifest", confidence: 0.4 });
55
+ }
56
+ try {
57
+ if (statSync(fullPath).isDirectory() && RESEARCH_DIRECTORIES.includes(lower)) {
58
+ signals.push({
59
+ path: entry + "/",
60
+ signal: `${lower} directory`,
61
+ confidence: 0.6
62
+ });
63
+ }
64
+ } catch {
65
+ }
66
+ }
67
+ return signals;
68
+ }
69
+ var DATASET_PATTERNS = [
70
+ /dataset\s*[:=]\s*["']?([A-Za-z0-9_\-]+)/i,
71
+ /--dataset\s+["']?([A-Za-z0-9_\-]+)/i,
72
+ /load_dataset\(["']([^"']+)/i,
73
+ /TinyStories/i,
74
+ /OpenWebText/i,
75
+ /WikiText/i,
76
+ /C4/i,
77
+ /The\s*Pile/i,
78
+ /MNIST/i,
79
+ /CIFAR/i,
80
+ /ImageNet/i
81
+ ];
82
+ var METRIC_PATTERNS = [
83
+ /val_bpb/i,
84
+ /val_loss/i,
85
+ /val_acc(?:uracy)?/i,
86
+ /test_acc(?:uracy)?/i,
87
+ /perplexity/i,
88
+ /bleu/i,
89
+ /rouge/i,
90
+ /f1[_\s]?score/i,
91
+ /auc/i,
92
+ /mse/i,
93
+ /rmse/i,
94
+ /mae/i
95
+ ];
96
+ var ARCHITECTURE_PATTERNS = [
97
+ /transformer/i,
98
+ /RWKV/i,
99
+ /SSM/i,
100
+ /Mamba/i,
101
+ /linear\s*attention/i,
102
+ /GPT/i,
103
+ /BERT/i,
104
+ /ResNet/i,
105
+ /CNN/i,
106
+ /RNN/i,
107
+ /LSTM/i,
108
+ /GRU/i,
109
+ /ViT/i,
110
+ /diffusion/i,
111
+ /autoencoder/i,
112
+ /GAN/i
113
+ ];
114
+ var FRAMEWORK_PATTERNS = [
115
+ [/import\s+torch|from\s+torch/i, "PyTorch"],
116
+ [/import\s+tensorflow|from\s+tensorflow/i, "TensorFlow"],
117
+ [/import\s+jax|from\s+jax/i, "JAX"],
118
+ [/from\s+transformers\s+import/i, "HuggingFace Transformers"],
119
+ [/import\s+keras|from\s+keras/i, "Keras"],
120
+ [/torch/i, "PyTorch"]
121
+ ];
122
+ function readFileContent(path, maxBytes = 5e4) {
123
+ try {
124
+ const stat = statSync(path);
125
+ if (stat.size > maxBytes) {
126
+ return readFileSync(path, { encoding: "utf-8", flag: "r" }).slice(0, maxBytes);
127
+ }
128
+ return readFileSync(path, "utf-8");
129
+ } catch {
130
+ return null;
131
+ }
132
+ }
133
+ function extractFromContent(repoPath, signals) {
134
+ const result = {
135
+ architectures: [],
136
+ constraints: [],
137
+ goals: []
138
+ };
139
+ const filesToRead = [
140
+ "program.md",
141
+ "train.py",
142
+ "train.js",
143
+ "train.ts",
144
+ "prepare.py",
145
+ "config.yaml",
146
+ "config.json",
147
+ "dataset.yaml",
148
+ "dataset.json",
149
+ "README.md",
150
+ "readme.md"
151
+ ];
152
+ const allContent = [];
153
+ for (const file of filesToRead) {
154
+ const content = readFileContent(join(repoPath, file));
155
+ if (content) {
156
+ allContent.push(content);
157
+ }
158
+ }
159
+ const combined = allContent.join("\n");
160
+ if (!combined) return result;
161
+ for (const pattern of DATASET_PATTERNS) {
162
+ const match = combined.match(pattern);
163
+ if (match) {
164
+ result.dataset = match[1] || match[0].trim();
165
+ break;
166
+ }
167
+ }
168
+ for (const pattern of METRIC_PATTERNS) {
169
+ const match = combined.match(pattern);
170
+ if (match) {
171
+ result.metric = match[0].trim().toLowerCase().replace(/\s+/g, "_");
172
+ break;
173
+ }
174
+ }
175
+ if (result.metric) {
176
+ const lossLike = /loss|bpb|perplexity|mse|rmse|mae|error/i.test(result.metric);
177
+ result.optimization = lossLike ? "minimize" : "maximize";
178
+ }
179
+ const archs = /* @__PURE__ */ new Set();
180
+ for (const pattern of ARCHITECTURE_PATTERNS) {
181
+ const match = combined.match(pattern);
182
+ if (match) {
183
+ archs.add(match[0].trim());
184
+ }
185
+ }
186
+ result.architectures = [...archs];
187
+ for (const [pattern, framework] of FRAMEWORK_PATTERNS) {
188
+ if (pattern.test(combined)) {
189
+ result.framework = framework;
190
+ break;
191
+ }
192
+ }
193
+ const programContent = readFileContent(join(repoPath, "program.md"));
194
+ if (programContent) {
195
+ result.hasProgram = true;
196
+ const lines = programContent.split("\n");
197
+ for (const line of lines) {
198
+ const trimmed = line.trim();
199
+ if ((trimmed.startsWith("- ") || trimmed.startsWith("* ")) && trimmed.length > 10 && trimmed.length < 200) {
200
+ result.goals.push(trimmed.slice(2).trim());
201
+ }
202
+ }
203
+ }
204
+ result.hasExperimentLoop = /experiment|loop|iteration|epoch|trial|run|sweep/i.test(combined) && /result|metric|eval|score|loss|accuracy/i.test(combined);
205
+ return result;
206
+ }
207
+ function classifyEnvironment(signals, extracted) {
208
+ let type = "unknown";
209
+ let confidence = 0;
210
+ const hasTrainCode = signals.some((s) => s.signal.includes("training code"));
211
+ const hasDataset = !!extracted.dataset;
212
+ const hasMetric = !!extracted.metric;
213
+ const hasExperimentLoop = !!extracted.hasExperimentLoop;
214
+ const hasResearchDirs = signals.some(
215
+ (s) => ["experiments directory", "results directory", "notebooks directory", "models directory"].includes(s.signal)
216
+ );
217
+ const researchScore = (hasTrainCode ? 30 : 0) + (hasDataset ? 20 : 0) + (hasMetric ? 20 : 0) + (hasExperimentLoop ? 15 : 0) + (hasResearchDirs ? 10 : 0) + (extracted.hasProgram ? 15 : 0);
218
+ if (researchScore >= 40) {
219
+ type = "research";
220
+ confidence = Math.min(researchScore, 100);
221
+ } else if (signals.length > 0) {
222
+ type = "application";
223
+ confidence = 30;
224
+ }
225
+ return {
226
+ type,
227
+ confidence,
228
+ dataset: extracted.dataset || null,
229
+ metric: extracted.metric || null,
230
+ optimization: extracted.optimization || null,
231
+ framework: extracted.framework || null,
232
+ architectures: extracted.architectures || [],
233
+ constraints: extracted.constraints || [],
234
+ hasExperimentLoop,
235
+ hasProgram: !!extracted.hasProgram,
236
+ goals: extracted.goals || [],
237
+ files: signals
238
+ };
239
+ }
240
+ function generateWorldFromDetection(env, repoName) {
241
+ const worldId = repoName.toLowerCase().replace(/[^a-z0-9]+/g, "_").replace(/^_|_$/g, "");
242
+ const dataset = env.dataset || "UNDETECTED";
243
+ const metric = env.metric || "val_loss";
244
+ const optimization = env.optimization || "minimize";
245
+ const framework = env.framework || "Unknown";
246
+ const architectures = env.architectures.length > 0 ? env.architectures.join(", ") : "not detected";
247
+ const contextDescription = env.type === "research" ? `ML research using ${framework} on ${dataset}` : `AI application with governance requirements`;
248
+ const goals = env.goals.length > 0 ? env.goals.slice(0, 3).map((g) => ` - ${g}`).join("\n") : " - Define research goals here";
249
+ return `---
250
+ world_id: ${worldId}
251
+ name: ${repoName} Research World
252
+ version: 1.0.0
253
+ runtime_mode: SIMULATION
254
+ default_profile: inferred
255
+ alternative_profile: strict
256
+ ---
257
+
258
+ # Thesis
259
+
260
+ ${contextDescription}. Experiments must be reproducible, metrics must be tracked, and agents must operate within the declared research context. This world was inferred from repository structure \u2014 review and refine the constraints below.
261
+
262
+ # Invariants
263
+
264
+ - \`experiments_must_be_reproducible\` \u2014 Every experiment must log architecture, hyperparameters, dataset, and training config sufficient to reproduce results (structural, immutable)
265
+ - \`metrics_must_be_recorded\` \u2014 Every training run must produce the primary evaluation metric (${metric}); runs without metrics are invalid (structural, immutable)
266
+ - \`dataset_is_${dataset.toLowerCase().replace(/[^a-z0-9]+/g, "_")}\` \u2014 The dataset "${dataset}" must be used for training and evaluation (structural, immutable)
267
+ - \`compute_budget_enforced\` \u2014 Experiments must respect declared compute limits (structural, immutable)
268
+
269
+ # State
270
+
271
+ ## experiments_run
272
+ - type: number
273
+ - min: 0
274
+ - max: 10000
275
+ - step: 1
276
+ - default: 0
277
+ - label: Experiments Run
278
+ - description: Total number of experiments completed
279
+
280
+ ## best_metric_value
281
+ - type: number
282
+ - min: -1000
283
+ - max: 1000
284
+ - step: 0.01
285
+ - default: ${optimization === "minimize" ? "100" : "-1000"}
286
+ - label: Best ${metric}
287
+ - description: Best value achieved for ${metric}
288
+
289
+ ## keep_rate
290
+ - type: number
291
+ - min: 0
292
+ - max: 100
293
+ - step: 1
294
+ - default: 0
295
+ - label: Keep Rate
296
+ - description: Percentage of experiments that improved on the best result
297
+
298
+ ## compute_used_minutes
299
+ - type: number
300
+ - min: 0
301
+ - max: 100000
302
+ - step: 1
303
+ - default: 0
304
+ - label: Compute Used (minutes)
305
+ - description: Total wall-clock training time consumed
306
+
307
+ ## compute_budget_minutes
308
+ - type: number
309
+ - min: 0
310
+ - max: 100000
311
+ - step: 60
312
+ - default: 1440
313
+ - label: Compute Budget (minutes)
314
+ - description: Maximum allowed compute time
315
+
316
+ ## failed_experiments
317
+ - type: number
318
+ - min: 0
319
+ - max: 10000
320
+ - step: 1
321
+ - default: 0
322
+ - label: Failed Experiments
323
+ - description: Number of experiments that failed to produce valid results
324
+
325
+ # Assumptions
326
+
327
+ ## inferred
328
+ - name: Inferred Configuration
329
+ - description: Configuration inferred from repository structure. Framework: ${framework}. Architectures: ${architectures}.
330
+ - framework: ${framework.toLowerCase().replace(/\s+/g, "_")}
331
+ - dataset: ${dataset.toLowerCase()}
332
+ - metric: ${metric}
333
+
334
+ ## strict
335
+ - name: Strict Configuration
336
+ - description: Conservative settings with tight compute limits and strict reproducibility requirements.
337
+ - framework: ${framework.toLowerCase().replace(/\s+/g, "_")}
338
+ - dataset: ${dataset.toLowerCase()}
339
+ - metric: ${metric}
340
+
341
+ # Rules
342
+
343
+ ## rule-001: Compute Budget Exhausted (structural)
344
+ When compute budget is exceeded, the research loop must halt.
345
+
346
+ When compute_used_minutes > compute_budget_minutes [state]
347
+ Then research_viability *= 0.00
348
+ Collapse: research_viability < 0.05
349
+
350
+ > trigger: Compute usage exceeds declared budget.
351
+ > rule: Compute budget is a hard constraint. Exceeding it halts all experiments.
352
+ > shift: Research loop terminates. Final results are reported.
353
+ > effect: Research viability set to zero.
354
+
355
+ ## rule-002: High Failure Rate (degradation)
356
+ Too many failed experiments indicate a systemic problem.
357
+
358
+ When failed_experiments > 5 [state] AND experiments_run > 0 [state]
359
+ Then research_viability *= 0.50
360
+
361
+ > trigger: More than 5 experiments have failed.
362
+ > rule: High failure rates waste compute and signal infrastructure problems.
363
+ > shift: Research viability degrades. Investigation needed.
364
+ > effect: Research viability reduced to 50%.
365
+
366
+ ## rule-003: No Metrics Recorded (structural)
367
+ Experiments without metrics are invalid.
368
+
369
+ When experiments_run > 0 [state] AND best_metric_value == ${optimization === "minimize" ? "100" : "-1000"} [state]
370
+ Then research_viability *= 0.30
371
+ Collapse: research_viability < 0.05
372
+
373
+ > trigger: Experiments ran but no metric improvement from default.
374
+ > rule: Research without measurement is not research.
375
+ > shift: Research viability drops sharply.
376
+ > effect: Research viability reduced to 30%.
377
+
378
+ ## rule-004: Strong Progress (advantage)
379
+ Consistent metric improvement validates the research approach.
380
+
381
+ When keep_rate > 20 [state] AND experiments_run > 5 [state]
382
+ Then research_viability *= 1.20
383
+
384
+ > trigger: Keep rate above 20% after 5+ experiments.
385
+ > rule: Productive research should be encouraged.
386
+ > shift: Research viability improves.
387
+ > effect: Research viability boosted by 20%.
388
+
389
+ # Gates
390
+
391
+ - BREAKTHROUGH: research_viability >= 90
392
+ - PRODUCTIVE: research_viability >= 60
393
+ - ONGOING: research_viability >= 35
394
+ - STRUGGLING: research_viability > 10
395
+ - HALTED: research_viability <= 10
396
+
397
+ # Outcomes
398
+
399
+ ## research_viability
400
+ - type: number
401
+ - range: 0-100
402
+ - display: percentage
403
+ - label: Research Viability
404
+ - primary: true
405
+
406
+ ## best_metric_value
407
+ - type: number
408
+ - range: -1000-1000
409
+ - display: decimal
410
+ - label: Best ${metric}
411
+
412
+ ## keep_rate
413
+ - type: number
414
+ - range: 0-100
415
+ - display: percentage
416
+ - label: Keep Rate
417
+ `;
418
+ }
419
+ function parseArgs(argv) {
420
+ let repoPath = "";
421
+ let outputPath = "";
422
+ let json = false;
423
+ let dryRun = false;
424
+ for (let i = 0; i < argv.length; i++) {
425
+ const arg = argv[i];
426
+ if (arg === "--output" && i + 1 < argv.length) {
427
+ outputPath = argv[++i];
428
+ } else if (arg === "--json") {
429
+ json = true;
430
+ } else if (arg === "--dry-run") {
431
+ dryRun = true;
432
+ } else if (!arg.startsWith("-") && !repoPath) {
433
+ repoPath = arg;
434
+ }
435
+ }
436
+ return { repoPath, outputPath, json, dryRun };
437
+ }
438
+ async function main(argv = process.argv.slice(2)) {
439
+ try {
440
+ const args = parseArgs(argv);
441
+ if (!args.repoPath) {
442
+ process.stderr.write("Usage: neuroverse infer-world <repo-path> [options]\n\n");
443
+ process.stderr.write("Scans an existing repository and generates a governance world file.\n\n");
444
+ process.stderr.write("Options:\n");
445
+ process.stderr.write(" --output <path> Output file path\n");
446
+ process.stderr.write(" --json Output detection results as JSON\n");
447
+ process.stderr.write(" --dry-run Show detections without writing\n");
448
+ process.stderr.write("\nExample:\n");
449
+ process.stderr.write(" neuroverse infer-world ./my-research-repo\n");
450
+ process.exit(1);
451
+ return;
452
+ }
453
+ const repoPath = resolve(args.repoPath);
454
+ if (!existsSync(repoPath)) {
455
+ process.stderr.write(`Repository not found: ${repoPath}
456
+ `);
457
+ process.exit(1);
458
+ return;
459
+ }
460
+ process.stderr.write(`Scanning ${repoPath}...
461
+ `);
462
+ const signals = scanRepo(repoPath);
463
+ if (signals.length === 0) {
464
+ process.stderr.write("No recognizable project structure detected.\n");
465
+ process.exit(2);
466
+ return;
467
+ }
468
+ const extracted = extractFromContent(repoPath, signals);
469
+ const env = classifyEnvironment(signals, extracted);
470
+ if (args.json) {
471
+ process.stdout.write(JSON.stringify(env, null, 2) + "\n");
472
+ process.exit(0);
473
+ return;
474
+ }
475
+ process.stderr.write("\n");
476
+ process.stderr.write(`Detected: ${env.type} environment (${env.confidence}% confidence)
477
+ `);
478
+ if (env.dataset) process.stderr.write(` Dataset: ${env.dataset}
479
+ `);
480
+ if (env.metric) process.stderr.write(` Metric: ${env.metric} (${env.optimization})
481
+ `);
482
+ if (env.framework) process.stderr.write(` Framework: ${env.framework}
483
+ `);
484
+ if (env.architectures.length > 0) process.stderr.write(` Architectures: ${env.architectures.join(", ")}
485
+ `);
486
+ if (env.hasExperimentLoop) process.stderr.write(` Experiment loop detected
487
+ `);
488
+ if (env.hasProgram) process.stderr.write(` Program file found (agent instructions)
489
+ `);
490
+ process.stderr.write("\n");
491
+ process.stderr.write(" Files analyzed:\n");
492
+ for (const s of env.files) {
493
+ process.stderr.write(` ${s.path} \u2014 ${s.signal}
494
+ `);
495
+ }
496
+ process.stderr.write("\n");
497
+ if (args.dryRun) {
498
+ process.stderr.write("Dry run \u2014 no files written.\n");
499
+ process.exit(0);
500
+ return;
501
+ }
502
+ const repoName = basename(repoPath);
503
+ const worldContent = generateWorldFromDetection(env, repoName);
504
+ const outputPath = args.outputPath || join(repoPath, "inferred.nv-world.md");
505
+ if (existsSync(outputPath)) {
506
+ process.stderr.write(`File already exists: ${outputPath}
507
+ `);
508
+ process.stderr.write("Use --output to specify a different path.\n");
509
+ process.exit(1);
510
+ return;
511
+ }
512
+ await writeFile(outputPath, worldContent, "utf-8");
513
+ process.stderr.write(`\u2713 World created: ${outputPath}
514
+
515
+ `);
516
+ process.stderr.write("Next steps:\n");
517
+ process.stderr.write(` Review and refine Edit ${outputPath}
518
+ `);
519
+ process.stderr.write(` Compile neuroverse bootstrap --input ${outputPath} --output ./world/ --validate
520
+ `);
521
+ process.stderr.write(` Simulate neuroverse simulate ${outputPath} --steps 5
522
+ `);
523
+ process.stderr.write("\n");
524
+ const result = {
525
+ created: outputPath,
526
+ environment: env,
527
+ nextSteps: [
528
+ `Edit ${outputPath} to refine inferred governance rules`,
529
+ `neuroverse bootstrap --input ${outputPath} --output ./world/ --validate`,
530
+ `neuroverse simulate ${outputPath} --steps 5`
531
+ ]
532
+ };
533
+ process.stdout.write(JSON.stringify(result, null, 2) + "\n");
534
+ process.exit(0);
535
+ } catch (e) {
536
+ process.stderr.write(`infer-world failed: ${e instanceof Error ? e.message : e}
537
+ `);
538
+ process.exit(3);
539
+ }
540
+ }
541
+ export {
542
+ main
543
+ };