@neuroverseos/governance 0.6.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +0 -3
- package/dist/{build-UTVDGHB3.js → build-EGBGZFIJ.js} +2 -2
- package/dist/chunk-T6EQ7ZBG.js +1571 -0
- package/dist/chunk-VGFDMPVB.js +436 -0
- package/dist/cli/neuroverse.cjs +2931 -213
- package/dist/cli/neuroverse.js +41 -90
- package/dist/cli/radiant.cjs +3997 -0
- package/dist/cli/radiant.d.cts +25 -0
- package/dist/cli/radiant.d.ts +25 -0
- package/dist/cli/radiant.js +429 -0
- package/dist/{derive-42IJW7JI.js → derive-7Y7YWVLU.js} +2 -2
- package/dist/index.js +28 -28
- package/dist/lenses-K5FVSALR.js +13 -0
- package/dist/radiant/index.cjs +3249 -0
- package/dist/radiant/index.d.cts +1301 -0
- package/dist/radiant/index.d.ts +1301 -0
- package/dist/radiant/index.js +90 -0
- package/dist/server-BXMC5NOE.js +271 -0
- package/dist/worlds/behavioral-demo.nv-world.md +50 -3
- package/package.json +7 -2
- package/dist/{chunk-735Z3HA4.js → chunk-FHXXD2TI.js} +3 -3
- package/dist/{configure-ai-5MP5DWTT.js → configure-ai-LL3VAPQW.js} +3 -3
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* CLI: neuroverse radiant <subcommand>
|
|
3
|
+
*
|
|
4
|
+
* Radiant — behavioral intelligence for collaboration systems.
|
|
5
|
+
* Subcommand family for the Radiant module of @neuroverseos/governance.
|
|
6
|
+
*
|
|
7
|
+
* Stage A (voice layer):
|
|
8
|
+
* neuroverse radiant think --lens <id> --worlds <dir> --query <query>
|
|
9
|
+
*
|
|
10
|
+
* Stage B (behavioral analysis, future):
|
|
11
|
+
* neuroverse radiant emergent <scope> --lens <id> --worlds <dir>
|
|
12
|
+
* neuroverse radiant decision <scope> <ref> --lens <id> --worlds <dir>
|
|
13
|
+
* neuroverse radiant signals <scope> --worlds <dir>
|
|
14
|
+
* neuroverse radiant drift <scope>
|
|
15
|
+
* neuroverse radiant evolve <scope>
|
|
16
|
+
* neuroverse radiant lenses list|describe <id>
|
|
17
|
+
*
|
|
18
|
+
* Environment:
|
|
19
|
+
* ANTHROPIC_API_KEY — required for commands that call the AI
|
|
20
|
+
* RADIANT_WORLDS — default worlds directory (overridden by --worlds)
|
|
21
|
+
* RADIANT_LENS — default lens id (overridden by --lens)
|
|
22
|
+
*/
|
|
23
|
+
declare function main(argv: string[]): Promise<void>;
|
|
24
|
+
|
|
25
|
+
export { main };
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* CLI: neuroverse radiant <subcommand>
|
|
3
|
+
*
|
|
4
|
+
* Radiant — behavioral intelligence for collaboration systems.
|
|
5
|
+
* Subcommand family for the Radiant module of @neuroverseos/governance.
|
|
6
|
+
*
|
|
7
|
+
* Stage A (voice layer):
|
|
8
|
+
* neuroverse radiant think --lens <id> --worlds <dir> --query <query>
|
|
9
|
+
*
|
|
10
|
+
* Stage B (behavioral analysis, future):
|
|
11
|
+
* neuroverse radiant emergent <scope> --lens <id> --worlds <dir>
|
|
12
|
+
* neuroverse radiant decision <scope> <ref> --lens <id> --worlds <dir>
|
|
13
|
+
* neuroverse radiant signals <scope> --worlds <dir>
|
|
14
|
+
* neuroverse radiant drift <scope>
|
|
15
|
+
* neuroverse radiant evolve <scope>
|
|
16
|
+
* neuroverse radiant lenses list|describe <id>
|
|
17
|
+
*
|
|
18
|
+
* Environment:
|
|
19
|
+
* ANTHROPIC_API_KEY — required for commands that call the AI
|
|
20
|
+
* RADIANT_WORLDS — default worlds directory (overridden by --worlds)
|
|
21
|
+
* RADIANT_LENS — default lens id (overridden by --lens)
|
|
22
|
+
*/
|
|
23
|
+
declare function main(argv: string[]): Promise<void>;
|
|
24
|
+
|
|
25
|
+
export { main };
|
|
@@ -0,0 +1,429 @@
|
|
|
1
|
+
import {
|
|
2
|
+
createAnthropicAI,
|
|
3
|
+
emergent,
|
|
4
|
+
parseRepoScope,
|
|
5
|
+
readExocortex,
|
|
6
|
+
summarizeExocortex,
|
|
7
|
+
think
|
|
8
|
+
} from "../chunk-T6EQ7ZBG.js";
|
|
9
|
+
import {
|
|
10
|
+
listLenses
|
|
11
|
+
} from "../chunk-VGFDMPVB.js";
|
|
12
|
+
import "../chunk-I4RTIMLX.js";
|
|
13
|
+
import "../chunk-ZAF6JH23.js";
|
|
14
|
+
import "../chunk-QLPTHTVB.js";
|
|
15
|
+
import "../chunk-QWGCMQQD.js";
|
|
16
|
+
|
|
17
|
+
// src/cli/radiant.ts
|
|
18
|
+
import { readFileSync, readdirSync, statSync, existsSync } from "fs";
|
|
19
|
+
import { resolve, join, extname } from "path";
|
|
20
|
+
var RED = "\x1B[31m";
|
|
21
|
+
var DIM = "\x1B[2m";
|
|
22
|
+
var BOLD = "\x1B[1m";
|
|
23
|
+
var YELLOW = "\x1B[33m";
|
|
24
|
+
var RESET = "\x1B[0m";
|
|
25
|
+
var USAGE = `
|
|
26
|
+
${BOLD}neuroverse radiant${RESET} \u2014 behavioral intelligence for collaboration systems
|
|
27
|
+
|
|
28
|
+
${BOLD}Stage A (voice layer):${RESET}
|
|
29
|
+
think Send a query through the worldmodel + lens \u2192 AI-framed response
|
|
30
|
+
|
|
31
|
+
${BOLD}Stage B (behavioral analysis, coming soon):${RESET}
|
|
32
|
+
emergent Pattern read on recent activity
|
|
33
|
+
decision Evaluate a specific artifact against the worldmodel
|
|
34
|
+
signals Extract signal matrix (debug)
|
|
35
|
+
lenses List or describe available rendering lenses
|
|
36
|
+
|
|
37
|
+
${BOLD}Usage:${RESET}
|
|
38
|
+
neuroverse radiant think --lens auki-builder --worlds ./worlds/ --query "What is our biggest risk?"
|
|
39
|
+
neuroverse radiant think --lens auki-builder --worlds ./worlds/ < prompt.txt
|
|
40
|
+
neuroverse radiant emergent aukiverse/posemesh --lens auki-builder --worlds ./worlds/
|
|
41
|
+
neuroverse radiant emergent aukiverse/posemesh --lens auki-builder --worlds ./worlds/ --exocortex ~/exocortex/
|
|
42
|
+
neuroverse radiant lenses list
|
|
43
|
+
neuroverse radiant lenses describe auki-builder
|
|
44
|
+
|
|
45
|
+
${BOLD}Environment:${RESET}
|
|
46
|
+
ANTHROPIC_API_KEY Required for AI commands (think, emergent, decision)
|
|
47
|
+
RADIANT_WORLDS Default worlds directory (overridden by --worlds)
|
|
48
|
+
RADIANT_LENS Default lens id (overridden by --lens)
|
|
49
|
+
RADIANT_MODEL AI model override (default: claude-sonnet-4-20250514)
|
|
50
|
+
RADIANT_EXOCORTEX Default exocortex directory (overridden by --exocortex)
|
|
51
|
+
`.trim();
|
|
52
|
+
function parseArgs(argv) {
|
|
53
|
+
const result = {
|
|
54
|
+
subcommand: void 0,
|
|
55
|
+
lens: void 0,
|
|
56
|
+
worlds: void 0,
|
|
57
|
+
query: void 0,
|
|
58
|
+
model: void 0,
|
|
59
|
+
exocortex: void 0,
|
|
60
|
+
json: false,
|
|
61
|
+
help: false,
|
|
62
|
+
rest: []
|
|
63
|
+
};
|
|
64
|
+
let i = 0;
|
|
65
|
+
if (argv.length > 0 && !argv[0].startsWith("-")) {
|
|
66
|
+
result.subcommand = argv[0];
|
|
67
|
+
i = 1;
|
|
68
|
+
}
|
|
69
|
+
while (i < argv.length) {
|
|
70
|
+
const arg = argv[i];
|
|
71
|
+
switch (arg) {
|
|
72
|
+
case "--lens":
|
|
73
|
+
result.lens = argv[++i];
|
|
74
|
+
break;
|
|
75
|
+
case "--worlds":
|
|
76
|
+
result.worlds = argv[++i];
|
|
77
|
+
break;
|
|
78
|
+
case "--query":
|
|
79
|
+
result.query = argv[++i];
|
|
80
|
+
break;
|
|
81
|
+
case "--model":
|
|
82
|
+
result.model = argv[++i];
|
|
83
|
+
break;
|
|
84
|
+
case "--exocortex":
|
|
85
|
+
result.exocortex = argv[++i];
|
|
86
|
+
break;
|
|
87
|
+
case "--json":
|
|
88
|
+
result.json = true;
|
|
89
|
+
break;
|
|
90
|
+
case "--help":
|
|
91
|
+
case "-h":
|
|
92
|
+
result.help = true;
|
|
93
|
+
break;
|
|
94
|
+
default:
|
|
95
|
+
result.rest.push(arg);
|
|
96
|
+
break;
|
|
97
|
+
}
|
|
98
|
+
i++;
|
|
99
|
+
}
|
|
100
|
+
return result;
|
|
101
|
+
}
|
|
102
|
+
function loadWorldmodelContent(worldsPath) {
|
|
103
|
+
const resolved = resolve(worldsPath);
|
|
104
|
+
if (!existsSync(resolved)) {
|
|
105
|
+
throw new Error(`Worlds path not found: ${resolved}`);
|
|
106
|
+
}
|
|
107
|
+
const stat = statSync(resolved);
|
|
108
|
+
if (stat.isFile()) {
|
|
109
|
+
return readFileSync(resolved, "utf-8");
|
|
110
|
+
}
|
|
111
|
+
if (stat.isDirectory()) {
|
|
112
|
+
const files = readdirSync(resolved).filter(
|
|
113
|
+
(f) => extname(f) === ".md" && (f.endsWith(".worldmodel.md") || f.endsWith(".nv-world.md"))
|
|
114
|
+
).sort();
|
|
115
|
+
if (files.length === 0) {
|
|
116
|
+
throw new Error(
|
|
117
|
+
`No .worldmodel.md or .nv-world.md files found in ${resolved}`
|
|
118
|
+
);
|
|
119
|
+
}
|
|
120
|
+
return files.map((f) => {
|
|
121
|
+
const content = readFileSync(join(resolved, f), "utf-8");
|
|
122
|
+
return `<!-- worldmodel: ${f} -->
|
|
123
|
+
${content}`;
|
|
124
|
+
}).join("\n\n---\n\n");
|
|
125
|
+
}
|
|
126
|
+
throw new Error(`Worlds path is neither a file nor a directory: ${resolved}`);
|
|
127
|
+
}
|
|
128
|
+
async function cmdThink(args) {
|
|
129
|
+
const lensId = args.lens ?? process.env.RADIANT_LENS;
|
|
130
|
+
if (!lensId) {
|
|
131
|
+
process.stderr.write(
|
|
132
|
+
`${RED}Error:${RESET} --lens <id> or RADIANT_LENS required.
|
|
133
|
+
${DIM}Available lenses: ${listLenses().join(", ")}${RESET}
|
|
134
|
+
`
|
|
135
|
+
);
|
|
136
|
+
process.exit(1);
|
|
137
|
+
}
|
|
138
|
+
const worldsPath = args.worlds ?? process.env.RADIANT_WORLDS;
|
|
139
|
+
if (!worldsPath) {
|
|
140
|
+
process.stderr.write(
|
|
141
|
+
`${RED}Error:${RESET} --worlds <dir> or RADIANT_WORLDS required.
|
|
142
|
+
`
|
|
143
|
+
);
|
|
144
|
+
process.exit(1);
|
|
145
|
+
}
|
|
146
|
+
const apiKey = process.env.ANTHROPIC_API_KEY;
|
|
147
|
+
if (!apiKey) {
|
|
148
|
+
process.stderr.write(
|
|
149
|
+
`${RED}Error:${RESET} ANTHROPIC_API_KEY environment variable not set.
|
|
150
|
+
${DIM}Set it to your Anthropic API key to use Radiant's AI features.${RESET}
|
|
151
|
+
`
|
|
152
|
+
);
|
|
153
|
+
process.exit(1);
|
|
154
|
+
}
|
|
155
|
+
let query = args.query;
|
|
156
|
+
if (!query && args.rest.length > 0) {
|
|
157
|
+
query = args.rest.join(" ");
|
|
158
|
+
}
|
|
159
|
+
if (!query && !process.stdin.isTTY) {
|
|
160
|
+
query = readFileSync(0, "utf-8").trim();
|
|
161
|
+
}
|
|
162
|
+
if (!query) {
|
|
163
|
+
process.stderr.write(
|
|
164
|
+
`${RED}Error:${RESET} No query provided.
|
|
165
|
+
${DIM}Use --query "...", pass as trailing args, or pipe via stdin.${RESET}
|
|
166
|
+
`
|
|
167
|
+
);
|
|
168
|
+
process.exit(1);
|
|
169
|
+
}
|
|
170
|
+
const worldmodelContent = loadWorldmodelContent(worldsPath);
|
|
171
|
+
const model = args.model ?? process.env.RADIANT_MODEL;
|
|
172
|
+
const ai = createAnthropicAI(apiKey, model || void 0);
|
|
173
|
+
process.stderr.write(
|
|
174
|
+
`${DIM}Worlds: ${worldsPath}${RESET}
|
|
175
|
+
${DIM}Lens: ${lensId}${RESET}
|
|
176
|
+
${DIM}Model: ${model ?? "claude-sonnet-4-20250514 (default)"}${RESET}
|
|
177
|
+
|
|
178
|
+
`
|
|
179
|
+
);
|
|
180
|
+
const result = await think({
|
|
181
|
+
worldmodelContent,
|
|
182
|
+
lensId,
|
|
183
|
+
query,
|
|
184
|
+
ai
|
|
185
|
+
});
|
|
186
|
+
if (!result.voiceClean) {
|
|
187
|
+
process.stderr.write(
|
|
188
|
+
`${YELLOW}Voice violations detected (${result.voiceViolations.length}):${RESET}
|
|
189
|
+
`
|
|
190
|
+
);
|
|
191
|
+
for (const v of result.voiceViolations) {
|
|
192
|
+
process.stderr.write(
|
|
193
|
+
` ${YELLOW}\u26A0${RESET} "${v.phrase}" at offset ${v.offset}
|
|
194
|
+
`
|
|
195
|
+
);
|
|
196
|
+
}
|
|
197
|
+
process.stderr.write("\n");
|
|
198
|
+
}
|
|
199
|
+
if (args.json) {
|
|
200
|
+
process.stdout.write(
|
|
201
|
+
JSON.stringify(
|
|
202
|
+
{
|
|
203
|
+
response: result.response,
|
|
204
|
+
lens: result.lens,
|
|
205
|
+
voiceClean: result.voiceClean,
|
|
206
|
+
voiceViolations: result.voiceViolations
|
|
207
|
+
},
|
|
208
|
+
null,
|
|
209
|
+
2
|
|
210
|
+
) + "\n"
|
|
211
|
+
);
|
|
212
|
+
} else {
|
|
213
|
+
process.stdout.write(result.response + "\n");
|
|
214
|
+
}
|
|
215
|
+
if (!result.voiceClean) {
|
|
216
|
+
process.exit(2);
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
async function cmdEmergent(args) {
|
|
220
|
+
const scopeStr = args.rest[0];
|
|
221
|
+
if (!scopeStr) {
|
|
222
|
+
process.stderr.write(
|
|
223
|
+
`${RED}Error:${RESET} Scope required. Usage: neuroverse radiant emergent <owner/repo>
|
|
224
|
+
`
|
|
225
|
+
);
|
|
226
|
+
process.exit(1);
|
|
227
|
+
}
|
|
228
|
+
const scope = parseRepoScope(scopeStr);
|
|
229
|
+
const lensId = args.lens ?? process.env.RADIANT_LENS;
|
|
230
|
+
if (!lensId) {
|
|
231
|
+
process.stderr.write(
|
|
232
|
+
`${RED}Error:${RESET} --lens <id> or RADIANT_LENS required.
|
|
233
|
+
${DIM}Available lenses: ${listLenses().join(", ")}${RESET}
|
|
234
|
+
`
|
|
235
|
+
);
|
|
236
|
+
process.exit(1);
|
|
237
|
+
}
|
|
238
|
+
const worldsPath = args.worlds ?? process.env.RADIANT_WORLDS;
|
|
239
|
+
if (!worldsPath) {
|
|
240
|
+
process.stderr.write(
|
|
241
|
+
`${RED}Error:${RESET} --worlds <dir> or RADIANT_WORLDS required.
|
|
242
|
+
`
|
|
243
|
+
);
|
|
244
|
+
process.exit(1);
|
|
245
|
+
}
|
|
246
|
+
const anthropicKey = process.env.ANTHROPIC_API_KEY;
|
|
247
|
+
if (!anthropicKey) {
|
|
248
|
+
process.stderr.write(
|
|
249
|
+
`${RED}Error:${RESET} ANTHROPIC_API_KEY environment variable not set.
|
|
250
|
+
`
|
|
251
|
+
);
|
|
252
|
+
process.exit(1);
|
|
253
|
+
}
|
|
254
|
+
const githubToken = process.env.GITHUB_TOKEN;
|
|
255
|
+
if (!githubToken) {
|
|
256
|
+
process.stderr.write(
|
|
257
|
+
`${RED}Error:${RESET} GITHUB_TOKEN environment variable not set.
|
|
258
|
+
${DIM}Set it to a GitHub PAT with repo read access.${RESET}
|
|
259
|
+
`
|
|
260
|
+
);
|
|
261
|
+
process.exit(1);
|
|
262
|
+
}
|
|
263
|
+
const worldmodelContent = loadWorldmodelContent(worldsPath);
|
|
264
|
+
const model = args.model ?? process.env.RADIANT_MODEL;
|
|
265
|
+
const ai = createAnthropicAI(anthropicKey, model || void 0);
|
|
266
|
+
const exocortexPath = args.exocortex ?? process.env.RADIANT_EXOCORTEX;
|
|
267
|
+
let exocortexStatus = "not loaded";
|
|
268
|
+
if (exocortexPath) {
|
|
269
|
+
const ctx = readExocortex(exocortexPath);
|
|
270
|
+
exocortexStatus = summarizeExocortex(ctx);
|
|
271
|
+
}
|
|
272
|
+
process.stderr.write(
|
|
273
|
+
`${DIM}Scope: ${scope.owner}/${scope.repo}${RESET}
|
|
274
|
+
${DIM}Lens: ${lensId}${RESET}
|
|
275
|
+
${DIM}Model: ${model ?? "claude-sonnet-4-20250514 (default)"}${RESET}
|
|
276
|
+
${DIM}ExoCortex: ${exocortexStatus}${RESET}
|
|
277
|
+
${DIM}Fetching activity...${RESET}
|
|
278
|
+
|
|
279
|
+
`
|
|
280
|
+
);
|
|
281
|
+
const result = await emergent({
|
|
282
|
+
scope,
|
|
283
|
+
githubToken,
|
|
284
|
+
worldmodelContent,
|
|
285
|
+
lensId,
|
|
286
|
+
ai,
|
|
287
|
+
windowDays: 14,
|
|
288
|
+
exocortexPath: exocortexPath || void 0
|
|
289
|
+
});
|
|
290
|
+
if (!result.voiceClean) {
|
|
291
|
+
process.stderr.write(
|
|
292
|
+
`${YELLOW}Voice violations (${result.voiceViolations.length}):${RESET}
|
|
293
|
+
`
|
|
294
|
+
);
|
|
295
|
+
for (const v of result.voiceViolations) {
|
|
296
|
+
process.stderr.write(
|
|
297
|
+
` ${YELLOW}\u26A0${RESET} "${v.phrase}" at offset ${v.offset}
|
|
298
|
+
`
|
|
299
|
+
);
|
|
300
|
+
}
|
|
301
|
+
process.stderr.write("\n");
|
|
302
|
+
}
|
|
303
|
+
if (args.json) {
|
|
304
|
+
process.stdout.write(
|
|
305
|
+
JSON.stringify(
|
|
306
|
+
{
|
|
307
|
+
text: result.text,
|
|
308
|
+
frontmatter: result.frontmatter,
|
|
309
|
+
scores: result.scores,
|
|
310
|
+
eventCount: result.eventCount,
|
|
311
|
+
voiceClean: result.voiceClean
|
|
312
|
+
},
|
|
313
|
+
null,
|
|
314
|
+
2
|
|
315
|
+
) + "\n"
|
|
316
|
+
);
|
|
317
|
+
} else {
|
|
318
|
+
process.stdout.write(result.text + "\n");
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
async function cmdLenses(args) {
|
|
322
|
+
const subSub = args.rest[0];
|
|
323
|
+
if (!subSub || subSub === "list") {
|
|
324
|
+
const ids = listLenses();
|
|
325
|
+
if (ids.length === 0) {
|
|
326
|
+
process.stdout.write("No lenses registered.\n");
|
|
327
|
+
} else {
|
|
328
|
+
for (const id of ids) {
|
|
329
|
+
process.stdout.write(`${id}
|
|
330
|
+
`);
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
return;
|
|
334
|
+
}
|
|
335
|
+
if (subSub === "describe") {
|
|
336
|
+
const { getLens } = await import("../lenses-K5FVSALR.js");
|
|
337
|
+
const id = args.rest[1];
|
|
338
|
+
if (!id) {
|
|
339
|
+
process.stderr.write(`${RED}Error:${RESET} Lens id required.
|
|
340
|
+
`);
|
|
341
|
+
process.exit(1);
|
|
342
|
+
}
|
|
343
|
+
const lens = getLens(id);
|
|
344
|
+
if (!lens) {
|
|
345
|
+
process.stderr.write(
|
|
346
|
+
`${RED}Error:${RESET} Lens "${id}" not found.
|
|
347
|
+
${DIM}Available: ${listLenses().join(", ")}${RESET}
|
|
348
|
+
`
|
|
349
|
+
);
|
|
350
|
+
process.exit(1);
|
|
351
|
+
}
|
|
352
|
+
process.stdout.write(`${BOLD}${lens.name}${RESET}
|
|
353
|
+
`);
|
|
354
|
+
process.stdout.write(`${lens.description}
|
|
355
|
+
|
|
356
|
+
`);
|
|
357
|
+
process.stdout.write(
|
|
358
|
+
`${BOLD}Domains:${RESET} ${lens.primary_frame.domains.join(", ")}
|
|
359
|
+
`
|
|
360
|
+
);
|
|
361
|
+
process.stdout.write(
|
|
362
|
+
`${BOLD}Overlaps:${RESET} ${lens.primary_frame.overlaps.map((o) => o.emergent_state).join(", ")}
|
|
363
|
+
`
|
|
364
|
+
);
|
|
365
|
+
process.stdout.write(
|
|
366
|
+
`${BOLD}Center:${RESET} ${lens.primary_frame.center_identity}
|
|
367
|
+
`
|
|
368
|
+
);
|
|
369
|
+
process.stdout.write(
|
|
370
|
+
`${BOLD}Forbidden phrases:${RESET} ${lens.forbidden_phrases.length}
|
|
371
|
+
`
|
|
372
|
+
);
|
|
373
|
+
process.stdout.write(
|
|
374
|
+
`${BOLD}Vocabulary terms:${RESET} ${lens.vocabulary.proper_nouns.length} proper nouns, ${Object.keys(lens.vocabulary.preferred).length} substitutions
|
|
375
|
+
`
|
|
376
|
+
);
|
|
377
|
+
process.stdout.write(
|
|
378
|
+
`${BOLD}Exemplars:${RESET} ${lens.exemplar_refs.length}
|
|
379
|
+
`
|
|
380
|
+
);
|
|
381
|
+
return;
|
|
382
|
+
}
|
|
383
|
+
process.stderr.write(
|
|
384
|
+
`${RED}Error:${RESET} Unknown lenses subcommand "${subSub}".
|
|
385
|
+
${DIM}Use: lenses list | lenses describe <id>${RESET}
|
|
386
|
+
`
|
|
387
|
+
);
|
|
388
|
+
process.exit(1);
|
|
389
|
+
}
|
|
390
|
+
async function main(argv) {
|
|
391
|
+
const args = parseArgs(argv);
|
|
392
|
+
if (args.help || !args.subcommand) {
|
|
393
|
+
process.stdout.write(USAGE + "\n");
|
|
394
|
+
return;
|
|
395
|
+
}
|
|
396
|
+
switch (args.subcommand) {
|
|
397
|
+
case "think":
|
|
398
|
+
return cmdThink(args);
|
|
399
|
+
case "lenses":
|
|
400
|
+
return cmdLenses(args);
|
|
401
|
+
case "emergent":
|
|
402
|
+
return cmdEmergent(args);
|
|
403
|
+
case "mcp": {
|
|
404
|
+
const { startRadiantMcp } = await import("../server-BXMC5NOE.js");
|
|
405
|
+
return startRadiantMcp(argv);
|
|
406
|
+
}
|
|
407
|
+
case "decision":
|
|
408
|
+
case "signals":
|
|
409
|
+
case "drift":
|
|
410
|
+
case "evolve":
|
|
411
|
+
process.stderr.write(
|
|
412
|
+
`${DIM}neuroverse radiant ${args.subcommand} is not yet implemented.${RESET}
|
|
413
|
+
`
|
|
414
|
+
);
|
|
415
|
+
process.exit(1);
|
|
416
|
+
break;
|
|
417
|
+
default:
|
|
418
|
+
process.stderr.write(
|
|
419
|
+
`${RED}Unknown radiant subcommand: "${args.subcommand}"${RESET}
|
|
420
|
+
|
|
421
|
+
`
|
|
422
|
+
);
|
|
423
|
+
process.stdout.write(USAGE + "\n");
|
|
424
|
+
process.exit(1);
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
export {
|
|
428
|
+
main
|
|
429
|
+
};
|
|
@@ -2,12 +2,12 @@ import {
|
|
|
2
2
|
DeriveInputError,
|
|
3
3
|
DeriveProviderError,
|
|
4
4
|
deriveWorld
|
|
5
|
-
} from "./chunk-
|
|
5
|
+
} from "./chunk-FHXXD2TI.js";
|
|
6
6
|
import {
|
|
7
7
|
DERIVE_EXIT_CODES
|
|
8
8
|
} from "./chunk-FMSTRBBS.js";
|
|
9
|
-
import "./chunk-OT6PXH54.js";
|
|
10
9
|
import "./chunk-INWQHLPS.js";
|
|
10
|
+
import "./chunk-OT6PXH54.js";
|
|
11
11
|
import "./chunk-7P3S7MAY.js";
|
|
12
12
|
import "./chunk-YPCVY4GS.js";
|
|
13
13
|
import "./chunk-3NZMMSOW.js";
|
package/dist/index.js
CHANGED
|
@@ -1,3 +1,17 @@
|
|
|
1
|
+
import {
|
|
2
|
+
generateImpactReport,
|
|
3
|
+
generateImpactReportFromFile,
|
|
4
|
+
renderImpactReport
|
|
5
|
+
} from "./chunk-OQU65525.js";
|
|
6
|
+
import {
|
|
7
|
+
VALIDATE_EXIT_CODES
|
|
8
|
+
} from "./chunk-I3RRAYK2.js";
|
|
9
|
+
import {
|
|
10
|
+
GUARD_EXIT_CODES,
|
|
11
|
+
classifyIntentWithAI,
|
|
12
|
+
evaluateGuardWithAI,
|
|
13
|
+
extractContentFields
|
|
14
|
+
} from "./chunk-6CV4XG3J.js";
|
|
1
15
|
import {
|
|
2
16
|
actionToGuardEvent,
|
|
3
17
|
createGovernor,
|
|
@@ -15,10 +29,20 @@ import {
|
|
|
15
29
|
generateAdaptationNarrative
|
|
16
30
|
} from "./chunk-CNSO6XW5.js";
|
|
17
31
|
import {
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
} from "./chunk-
|
|
32
|
+
deriveWorld,
|
|
33
|
+
extractWorldMarkdown,
|
|
34
|
+
normalizeWorldMarkdown
|
|
35
|
+
} from "./chunk-FHXXD2TI.js";
|
|
36
|
+
import {
|
|
37
|
+
CONFIGURE_AI_EXIT_CODES,
|
|
38
|
+
DERIVE_EXIT_CODES
|
|
39
|
+
} from "./chunk-FMSTRBBS.js";
|
|
40
|
+
import "./chunk-INWQHLPS.js";
|
|
41
|
+
import "./chunk-OT6PXH54.js";
|
|
42
|
+
import {
|
|
43
|
+
explainWorld,
|
|
44
|
+
renderExplainText
|
|
45
|
+
} from "./chunk-ZJTDUCC2.js";
|
|
22
46
|
import {
|
|
23
47
|
improveWorld,
|
|
24
48
|
renderImproveText
|
|
@@ -26,15 +50,6 @@ import {
|
|
|
26
50
|
import {
|
|
27
51
|
BOOTSTRAP_EXIT_CODES
|
|
28
52
|
} from "./chunk-4NGDRRQH.js";
|
|
29
|
-
import {
|
|
30
|
-
VALIDATE_EXIT_CODES
|
|
31
|
-
} from "./chunk-I3RRAYK2.js";
|
|
32
|
-
import {
|
|
33
|
-
GUARD_EXIT_CODES,
|
|
34
|
-
classifyIntentWithAI,
|
|
35
|
-
evaluateGuardWithAI,
|
|
36
|
-
extractContentFields
|
|
37
|
-
} from "./chunk-6CV4XG3J.js";
|
|
38
53
|
import {
|
|
39
54
|
SessionManager,
|
|
40
55
|
runInteractiveMode,
|
|
@@ -63,24 +78,9 @@ import {
|
|
|
63
78
|
classifyIntent,
|
|
64
79
|
parseGuardDescription
|
|
65
80
|
} from "./chunk-FS2UUJJO.js";
|
|
66
|
-
import {
|
|
67
|
-
deriveWorld,
|
|
68
|
-
extractWorldMarkdown,
|
|
69
|
-
normalizeWorldMarkdown
|
|
70
|
-
} from "./chunk-735Z3HA4.js";
|
|
71
|
-
import {
|
|
72
|
-
CONFIGURE_AI_EXIT_CODES,
|
|
73
|
-
DERIVE_EXIT_CODES
|
|
74
|
-
} from "./chunk-FMSTRBBS.js";
|
|
75
|
-
import "./chunk-OT6PXH54.js";
|
|
76
|
-
import "./chunk-INWQHLPS.js";
|
|
77
81
|
import {
|
|
78
82
|
validateWorld
|
|
79
83
|
} from "./chunk-7P3S7MAY.js";
|
|
80
|
-
import {
|
|
81
|
-
explainWorld,
|
|
82
|
-
renderExplainText
|
|
83
|
-
} from "./chunk-ZJTDUCC2.js";
|
|
84
84
|
import {
|
|
85
85
|
emitWorldDefinition
|
|
86
86
|
} from "./chunk-YPCVY4GS.js";
|