@neuroverseos/governance 0.6.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,90 @@
1
+ import {
2
+ DEFAULT_EVIDENCE_GATE,
3
+ DEFAULT_SIGNAL_EXTRACTORS,
4
+ auditGovernance,
5
+ checkForbiddenPhrases,
6
+ classifyActorDomain,
7
+ classifyEvents,
8
+ composeSystemPrompt,
9
+ computePersistence,
10
+ createAnthropicAI,
11
+ createMockAI,
12
+ createMockGitHubAdapter,
13
+ emergent,
14
+ extractSignals,
15
+ fetchGitHubActivity,
16
+ formatExocortexForPrompt,
17
+ formatPriorReadsForPrompt,
18
+ formatScope,
19
+ interpretPatterns,
20
+ isPresent,
21
+ isScored,
22
+ isSentinel,
23
+ loadPriorReads,
24
+ parseRepoScope,
25
+ presenceAverage,
26
+ readExocortex,
27
+ render,
28
+ scoreComposite,
29
+ scoreCyber,
30
+ scoreLife,
31
+ scoreNeuroVerse,
32
+ summarizeExocortex,
33
+ think,
34
+ updateKnowledge,
35
+ writeRead
36
+ } from "../chunk-T6EQ7ZBG.js";
37
+ import {
38
+ LENSES,
39
+ aukiBuilderLens,
40
+ getLens,
41
+ listLenses
42
+ } from "../chunk-VGFDMPVB.js";
43
+ import "../chunk-I4RTIMLX.js";
44
+ import "../chunk-ZAF6JH23.js";
45
+ import "../chunk-QLPTHTVB.js";
46
+ import "../chunk-QWGCMQQD.js";
47
+
48
+ // src/radiant/index.ts
49
+ var RADIANT_PACKAGE_VERSION = "0.0.0";
50
+ export {
51
+ DEFAULT_EVIDENCE_GATE,
52
+ DEFAULT_SIGNAL_EXTRACTORS,
53
+ LENSES,
54
+ RADIANT_PACKAGE_VERSION,
55
+ auditGovernance,
56
+ aukiBuilderLens,
57
+ checkForbiddenPhrases,
58
+ classifyActorDomain,
59
+ classifyEvents,
60
+ composeSystemPrompt,
61
+ computePersistence,
62
+ createAnthropicAI,
63
+ createMockAI,
64
+ createMockGitHubAdapter,
65
+ emergent,
66
+ extractSignals,
67
+ fetchGitHubActivity,
68
+ formatExocortexForPrompt,
69
+ formatPriorReadsForPrompt,
70
+ formatScope,
71
+ getLens,
72
+ interpretPatterns,
73
+ isPresent,
74
+ isScored,
75
+ isSentinel,
76
+ listLenses,
77
+ loadPriorReads,
78
+ parseRepoScope,
79
+ presenceAverage,
80
+ readExocortex,
81
+ render,
82
+ scoreComposite,
83
+ scoreCyber,
84
+ scoreLife,
85
+ scoreNeuroVerse,
86
+ summarizeExocortex,
87
+ think,
88
+ updateKnowledge,
89
+ writeRead
90
+ };
@@ -0,0 +1,271 @@
1
+ import {
2
+ createAnthropicAI,
3
+ emergent,
4
+ parseRepoScope,
5
+ think
6
+ } from "./chunk-T6EQ7ZBG.js";
7
+ import "./chunk-VGFDMPVB.js";
8
+ import "./chunk-I4RTIMLX.js";
9
+ import "./chunk-ZAF6JH23.js";
10
+ import "./chunk-QLPTHTVB.js";
11
+ import "./chunk-QWGCMQQD.js";
12
+
13
+ // src/radiant/mcp/server.ts
14
+ import { readFileSync, readdirSync, statSync, existsSync } from "fs";
15
+ import { resolve, join, extname } from "path";
16
+ var TOOLS = [
17
+ {
18
+ name: "radiant_think",
19
+ description: "Send a query through the loaded worldmodel + rendering lens and get an Auki-framed response. Use this for strategic questions, decision evaluation, or any question that should be interpreted through the organization's behavioral model.",
20
+ inputSchema: {
21
+ type: "object",
22
+ properties: {
23
+ query: {
24
+ type: "string",
25
+ description: "The question or prompt to interpret through the worldmodel + lens."
26
+ }
27
+ },
28
+ required: ["query"]
29
+ }
30
+ },
31
+ {
32
+ name: "radiant_emergent",
33
+ description: "Run a behavioral read on a GitHub repository. Fetches recent activity, classifies events, extracts signals, identifies patterns, computes alignment scores, and produces the EMERGENT / MEANING / MOVE / ALIGNMENT output. Use this when asked about team activity, coordination patterns, or alignment with the worldmodel.",
34
+ inputSchema: {
35
+ type: "object",
36
+ properties: {
37
+ scope: {
38
+ type: "string",
39
+ description: 'GitHub repository in "owner/repo" format (e.g. "aukiverse/posemesh").'
40
+ },
41
+ exocortex_dir: {
42
+ type: "string",
43
+ description: "Optional path to an exocortex directory for stated-intent-vs-observed-behavior comparison."
44
+ }
45
+ },
46
+ required: ["scope"]
47
+ }
48
+ }
49
+ ];
50
+ var RadiantMcpServer = class {
51
+ config;
52
+ worldmodelContent;
53
+ buffer = "";
54
+ constructor(config) {
55
+ this.config = config;
56
+ this.worldmodelContent = loadWorldmodelContent(config.worldsPath);
57
+ }
58
+ async start() {
59
+ process.stderr.write(
60
+ `Radiant MCP server starting
61
+ Worlds: ${this.config.worldsPath}
62
+ Lens: ${this.config.lensId}
63
+ Tools: radiant_think, radiant_emergent
64
+ `
65
+ );
66
+ process.stdin.setEncoding("utf-8");
67
+ process.stdin.on("data", (chunk) => {
68
+ this.buffer += chunk;
69
+ this.processBuffer();
70
+ });
71
+ process.stdin.on("end", () => {
72
+ process.exit(0);
73
+ });
74
+ }
75
+ processBuffer() {
76
+ const lines = this.buffer.split("\n");
77
+ this.buffer = lines.pop() ?? "";
78
+ for (const line of lines) {
79
+ const trimmed = line.trim();
80
+ if (!trimmed) continue;
81
+ try {
82
+ const request = JSON.parse(trimmed);
83
+ this.handleRequest(request).catch((err) => {
84
+ this.sendError(request.id, -32603, String(err));
85
+ });
86
+ } catch {
87
+ }
88
+ }
89
+ }
90
+ async handleRequest(req) {
91
+ switch (req.method) {
92
+ case "initialize":
93
+ this.sendResult(req.id, {
94
+ protocolVersion: "2024-11-05",
95
+ capabilities: { tools: {} },
96
+ serverInfo: {
97
+ name: "radiant",
98
+ version: "0.6.1"
99
+ }
100
+ });
101
+ break;
102
+ case "notifications/initialized":
103
+ break;
104
+ case "tools/list":
105
+ this.sendResult(req.id, { tools: TOOLS });
106
+ break;
107
+ case "tools/call":
108
+ await this.handleToolCall(req);
109
+ break;
110
+ default:
111
+ this.sendError(req.id, -32601, `Unknown method: ${req.method}`);
112
+ }
113
+ }
114
+ async handleToolCall(req) {
115
+ const params = req.params;
116
+ if (!params?.name) {
117
+ this.sendError(req.id, -32602, "Missing tool name");
118
+ return;
119
+ }
120
+ const args = params.arguments ?? {};
121
+ try {
122
+ switch (params.name) {
123
+ case "radiant_think":
124
+ await this.handleThink(req.id, args);
125
+ break;
126
+ case "radiant_emergent":
127
+ await this.handleEmergent(req.id, args);
128
+ break;
129
+ default:
130
+ this.sendError(req.id, -32602, `Unknown tool: ${params.name}`);
131
+ }
132
+ } catch (err) {
133
+ this.sendResult(req.id, {
134
+ content: [{ type: "text", text: `Error: ${err}` }],
135
+ isError: true
136
+ });
137
+ }
138
+ }
139
+ async handleThink(id, args) {
140
+ const query = String(args.query ?? "");
141
+ if (!query) {
142
+ this.sendResult(id, {
143
+ content: [{ type: "text", text: "Error: query is required" }],
144
+ isError: true
145
+ });
146
+ return;
147
+ }
148
+ const apiKey = process.env.ANTHROPIC_API_KEY;
149
+ if (!apiKey) {
150
+ this.sendResult(id, {
151
+ content: [{ type: "text", text: "Error: ANTHROPIC_API_KEY not set" }],
152
+ isError: true
153
+ });
154
+ return;
155
+ }
156
+ const ai = createAnthropicAI(apiKey, this.config.model || void 0);
157
+ const result = await think({
158
+ worldmodelContent: this.worldmodelContent,
159
+ lensId: this.config.lensId,
160
+ query,
161
+ ai
162
+ });
163
+ let text = result.response;
164
+ if (!result.voiceClean) {
165
+ text += `
166
+
167
+ \u26A0 Voice violations detected: ${result.voiceViolations.map((v) => v.phrase).join(", ")}`;
168
+ }
169
+ this.sendResult(id, {
170
+ content: [{ type: "text", text }]
171
+ });
172
+ }
173
+ async handleEmergent(id, args) {
174
+ const scopeStr = String(args.scope ?? "");
175
+ if (!scopeStr) {
176
+ this.sendResult(id, {
177
+ content: [{ type: "text", text: 'Error: scope is required (e.g. "aukiverse/posemesh")' }],
178
+ isError: true
179
+ });
180
+ return;
181
+ }
182
+ const apiKey = process.env.ANTHROPIC_API_KEY;
183
+ const githubToken = process.env.GITHUB_TOKEN;
184
+ if (!apiKey) {
185
+ this.sendResult(id, {
186
+ content: [{ type: "text", text: "Error: ANTHROPIC_API_KEY not set" }],
187
+ isError: true
188
+ });
189
+ return;
190
+ }
191
+ if (!githubToken) {
192
+ this.sendResult(id, {
193
+ content: [{ type: "text", text: "Error: GITHUB_TOKEN not set" }],
194
+ isError: true
195
+ });
196
+ return;
197
+ }
198
+ const scope = parseRepoScope(scopeStr);
199
+ const ai = createAnthropicAI(apiKey, this.config.model || void 0);
200
+ const exocortexPath = args.exocortex_dir ? String(args.exocortex_dir) : void 0;
201
+ const result = await emergent({
202
+ scope,
203
+ githubToken,
204
+ worldmodelContent: this.worldmodelContent,
205
+ lensId: this.config.lensId,
206
+ ai,
207
+ windowDays: 14,
208
+ exocortexPath
209
+ });
210
+ let text = result.text;
211
+ if (!result.voiceClean) {
212
+ text += `
213
+
214
+ \u26A0 Voice violations: ${result.voiceViolations.map((v) => v.phrase).join(", ")}`;
215
+ }
216
+ this.sendResult(id, {
217
+ content: [{ type: "text", text }]
218
+ });
219
+ }
220
+ sendResult(id, result) {
221
+ const response = { jsonrpc: "2.0", id, result };
222
+ process.stdout.write(JSON.stringify(response) + "\n");
223
+ }
224
+ sendError(id, code, message) {
225
+ const response = {
226
+ jsonrpc: "2.0",
227
+ id: id ?? null,
228
+ error: { code, message }
229
+ };
230
+ process.stdout.write(JSON.stringify(response) + "\n");
231
+ }
232
+ };
233
+ function loadWorldmodelContent(worldsPath) {
234
+ const resolved = resolve(worldsPath);
235
+ if (!existsSync(resolved)) {
236
+ throw new Error(`Worlds path not found: ${resolved}`);
237
+ }
238
+ const stat = statSync(resolved);
239
+ if (stat.isFile()) {
240
+ return readFileSync(resolved, "utf-8");
241
+ }
242
+ if (stat.isDirectory()) {
243
+ const files = readdirSync(resolved).filter(
244
+ (f) => extname(f) === ".md" && (f.endsWith(".worldmodel.md") || f.endsWith(".nv-world.md"))
245
+ ).sort();
246
+ if (files.length === 0) {
247
+ throw new Error(`No worldmodel files found in ${resolved}`);
248
+ }
249
+ return files.map((f) => readFileSync(join(resolved, f), "utf-8")).join("\n\n---\n\n");
250
+ }
251
+ throw new Error(`Worlds path is neither a file nor directory: ${resolved}`);
252
+ }
253
+ async function startRadiantMcp(args) {
254
+ function parseArg(flag) {
255
+ const idx = args.indexOf(flag);
256
+ return idx >= 0 && idx + 1 < args.length ? args[idx + 1] : void 0;
257
+ }
258
+ const worldsPath = parseArg("--worlds") ?? process.env.RADIANT_WORLDS;
259
+ const lensId = parseArg("--lens") ?? process.env.RADIANT_LENS ?? "auki-builder";
260
+ const model = parseArg("--model") ?? process.env.RADIANT_MODEL;
261
+ if (!worldsPath) {
262
+ process.stderr.write("Error: --worlds <dir> or RADIANT_WORLDS required.\n");
263
+ process.exit(1);
264
+ }
265
+ const server = new RadiantMcpServer({ worldsPath, lensId, model });
266
+ await server.start();
267
+ }
268
+ export {
269
+ RadiantMcpServer,
270
+ startRadiantMcp
271
+ };
@@ -1,19 +1,20 @@
1
1
  ---
2
2
  world_id: behavioral-demo
3
- name: Behavioral Demo
4
- version: 1.0.0
3
+ name: Behavioral Governance Runtime
4
+ version: 2.0.0
5
5
  default_profile: baseline
6
6
  alternative_profile: pressure
7
7
  ---
8
8
 
9
9
  # Thesis
10
10
 
11
- Behavior should be interpreted through repeated action, clarity of ownership, and consistency between stated intent and follow-through. Actions are stronger evidence than promises. Ambiguity is a signal, not noise. Alignment is measured by what people do, not what they say.
11
+ Behavior should be interpreted through repeated action, clarity of ownership, and consistency between stated intent and follow-through. Decisions are governed by observed alignment, not stated confidence. When the system must choose ship, delay, or escalate it reads behavior, not promises.
12
12
 
13
13
  # Invariants
14
14
 
15
15
  - `behavior_over_promises` — Repeated action is stronger evidence than stated intent (structural, immutable)
16
16
  - `clarity_matters` — Ambiguity and ownership diffusion are meaningful behavioral signals (structural, immutable)
17
+ - `decisions_follow_alignment` — Decisions must reflect measured alignment, not declared readiness (structural, immutable)
17
18
 
18
19
  # State
19
20
 
@@ -44,6 +45,13 @@ Behavior should be interpreted through repeated action, clarity of ownership, an
44
45
  - label: Alignment Score
45
46
  - description: Measures consistency between stated priorities and actual behavior
46
47
 
48
+ ## decision
49
+ - type: enum
50
+ - options: no_decision, ship_now, delay, escalate
51
+ - default: no_decision
52
+ - label: Decision
53
+ - description: Governed output — what the system recommends based on behavioral alignment
54
+
47
55
  # Assumptions
48
56
 
49
57
  ## baseline
@@ -92,6 +100,39 @@ Then alignment_score *= 1.10
92
100
  > shift: Interpretation becomes more confident.
93
101
  > effect: Alignment score improves.
94
102
 
103
+ ## rule-004: alignment supports shipping (advantage)
104
+ Strong alignment produces a ship decision.
105
+
106
+ When alignment_score >= 75 [state]
107
+ Then decision = "ship_now"
108
+
109
+ > trigger: Behavioral alignment is strong enough to act.
110
+ > rule: When actions match intent consistently, the system recommends execution.
111
+ > shift: Decision moves to ship.
112
+ > effect: Decision set to ship_now.
113
+
114
+ ## rule-005: ambiguity requires delay (degradation)
115
+ Moderate alignment produces a delay decision.
116
+
117
+ When alignment_score < 75 [state] AND alignment_score >= 45 [state]
118
+ Then decision = "delay"
119
+
120
+ > trigger: Alignment is uncertain — behavior and intent are not fully consistent.
121
+ > rule: When signals are mixed, the system recommends waiting for clarity.
122
+ > shift: Decision moves to delay.
123
+ > effect: Decision set to delay.
124
+
125
+ ## rule-006: misalignment triggers escalation (structural)
126
+ Weak alignment produces an escalate decision.
127
+
128
+ When alignment_score < 45 [state]
129
+ Then decision = "escalate"
130
+
131
+ > trigger: Behavioral alignment has degraded below acceptable threshold.
132
+ > rule: When actions consistently contradict stated intent, the system escalates.
133
+ > shift: Decision moves to escalate.
134
+ > effect: Decision set to escalate.
135
+
95
136
  # Gates
96
137
 
97
138
  - STRONG: alignment_score >= 85
@@ -109,6 +150,12 @@ Then alignment_score *= 1.10
109
150
  - label: Alignment Score
110
151
  - primary: true
111
152
 
153
+ ## decision
154
+ - type: enum
155
+ - range: ship_now, delay, escalate, no_decision
156
+ - display: label
157
+ - label: Decision
158
+
112
159
  # Lenses
113
160
  - policy: role_default
114
161
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@neuroverseos/governance",
3
- "version": "0.6.0",
3
+ "version": "0.7.0",
4
4
  "description": "Deterministic governance engine for AI agents — enforce worlds (permanent rules) and plans (mission constraints) with full audit trace",
5
5
  "license": "Apache-2.0",
6
6
  "type": "module",
@@ -97,6 +97,11 @@
97
97
  "types": "./dist/admin/index.d.ts",
98
98
  "import": "./dist/admin/index.js",
99
99
  "require": "./dist/admin/index.cjs"
100
+ },
101
+ "./radiant": {
102
+ "types": "./dist/radiant/index.d.ts",
103
+ "import": "./dist/radiant/index.js",
104
+ "require": "./dist/radiant/index.cjs"
100
105
  }
101
106
  },
102
107
  "bin": {
@@ -119,7 +124,7 @@
119
124
  ],
120
125
  "scripts": {
121
126
  "prepare": "npm run build",
122
- "build": "tsup src/index.ts src/types.ts src/cli/neuroverse.ts src/cli/plan.ts src/cli/run.ts src/cli/worldmodel.ts src/adapters/index.ts src/adapters/langchain.ts src/adapters/openai.ts src/adapters/openclaw.ts src/adapters/express.ts src/adapters/autoresearch.ts src/adapters/deep-agents.ts src/adapters/mentraos.ts src/adapters/github.ts src/engine/guard-engine.ts src/engine/simulate-engine.ts src/engine/bootstrap-parser.ts src/engine/bootstrap-emitter.ts src/engine/worldmodel-parser.ts src/engine/worldmodel-compiler.ts src/spatial/index.ts src/admin/index.ts --format esm,cjs --dts --clean && npm run build:browser && cp src/worlds/*.nv-world.md dist/worlds/ && cp src/worlds/*.worldmodel.md dist/worlds/ && cp src/spatial/zones/*.nv-world.md dist/worlds/ && npm run build:viz",
127
+ "build": "tsup src/index.ts src/types.ts src/cli/neuroverse.ts src/cli/plan.ts src/cli/run.ts src/cli/worldmodel.ts src/cli/radiant.ts src/adapters/index.ts src/adapters/langchain.ts src/adapters/openai.ts src/adapters/openclaw.ts src/adapters/express.ts src/adapters/autoresearch.ts src/adapters/deep-agents.ts src/adapters/mentraos.ts src/adapters/github.ts src/engine/guard-engine.ts src/engine/simulate-engine.ts src/engine/bootstrap-parser.ts src/engine/bootstrap-emitter.ts src/engine/worldmodel-parser.ts src/engine/worldmodel-compiler.ts src/spatial/index.ts src/admin/index.ts src/radiant/index.ts --format esm,cjs --dts --clean && npm run build:browser && cp src/worlds/*.nv-world.md dist/worlds/ && cp src/worlds/*.worldmodel.md dist/worlds/ && cp src/spatial/zones/*.nv-world.md dist/worlds/ && npm run build:viz",
123
128
  "build:browser": "tsup src/browser.ts --format iife --global-name NeuroVerse --outDir dist --no-dts",
124
129
  "build:viz": "vite build",
125
130
  "dev:viz": "vite dev",
@@ -1,9 +1,9 @@
1
- import {
2
- loadConfig
3
- } from "./chunk-OT6PXH54.js";
4
1
  import {
5
2
  createProvider
6
3
  } from "./chunk-INWQHLPS.js";
4
+ import {
5
+ loadConfig
6
+ } from "./chunk-OT6PXH54.js";
7
7
  import {
8
8
  validateWorld
9
9
  } from "./chunk-7P3S7MAY.js";
@@ -1,15 +1,15 @@
1
1
  import {
2
2
  CONFIGURE_AI_EXIT_CODES
3
3
  } from "./chunk-FMSTRBBS.js";
4
+ import {
5
+ createProvider
6
+ } from "./chunk-INWQHLPS.js";
4
7
  import {
5
8
  getConfigPath,
6
9
  loadConfig,
7
10
  redactConfig,
8
11
  saveConfig
9
12
  } from "./chunk-OT6PXH54.js";
10
- import {
11
- createProvider
12
- } from "./chunk-INWQHLPS.js";
13
13
  import "./chunk-QWGCMQQD.js";
14
14
 
15
15
  // src/cli/configure-ai.ts