@neuroverseos/governance 0.6.0 → 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{build-UTVDGHB3.js → build-EGBGZFIJ.js} +2 -2
- package/dist/chunk-AEVT7DSZ.js +1208 -0
- package/dist/chunk-VGFDMPVB.js +436 -0
- package/dist/cli/neuroverse.cjs +2288 -211
- package/dist/cli/neuroverse.js +41 -90
- package/dist/cli/radiant.cjs +2058 -0
- package/dist/cli/radiant.d.cts +25 -0
- package/dist/cli/radiant.d.ts +25 -0
- package/dist/cli/radiant.js +422 -0
- package/dist/{derive-42IJW7JI.js → derive-7Y7YWVLU.js} +2 -2
- package/dist/index.js +28 -28
- package/dist/lenses-K5FVSALR.js +13 -0
- package/dist/radiant/index.cjs +1700 -0
- package/dist/radiant/index.d.cts +1163 -0
- package/dist/radiant/index.d.ts +1163 -0
- package/dist/radiant/index.js +75 -0
- package/dist/worlds/behavioral-demo.nv-world.md +50 -3
- package/package.json +7 -2
- package/dist/{chunk-735Z3HA4.js → chunk-FHXXD2TI.js} +3 -3
- package/dist/{configure-ai-5MP5DWTT.js → configure-ai-LL3VAPQW.js} +3 -3
package/dist/cli/neuroverse.cjs
CHANGED
|
@@ -2002,10 +2002,10 @@ __export(world_loader_exports, {
|
|
|
2002
2002
|
});
|
|
2003
2003
|
async function loadWorldFromDirectory(dirPath) {
|
|
2004
2004
|
const { readFile: readFile4 } = await import("fs/promises");
|
|
2005
|
-
const { join:
|
|
2006
|
-
const { readdirSync:
|
|
2005
|
+
const { join: join16 } = await import("path");
|
|
2006
|
+
const { readdirSync: readdirSync7 } = await import("fs");
|
|
2007
2007
|
async function readJson(filename) {
|
|
2008
|
-
const filePath =
|
|
2008
|
+
const filePath = join16(dirPath, filename);
|
|
2009
2009
|
try {
|
|
2010
2010
|
const content = await readFile4(filePath, "utf-8");
|
|
2011
2011
|
return JSON.parse(content);
|
|
@@ -2035,11 +2035,11 @@ async function loadWorldFromDirectory(dirPath) {
|
|
|
2035
2035
|
const metadataJson = await readJson("metadata.json");
|
|
2036
2036
|
const rules = [];
|
|
2037
2037
|
try {
|
|
2038
|
-
const rulesDir =
|
|
2039
|
-
const ruleFiles =
|
|
2038
|
+
const rulesDir = join16(dirPath, "rules");
|
|
2039
|
+
const ruleFiles = readdirSync7(rulesDir).filter((f) => f.endsWith(".json")).sort();
|
|
2040
2040
|
for (const file of ruleFiles) {
|
|
2041
2041
|
try {
|
|
2042
|
-
const content = await readFile4(
|
|
2042
|
+
const content = await readFile4(join16(rulesDir, file), "utf-8");
|
|
2043
2043
|
rules.push(JSON.parse(content));
|
|
2044
2044
|
} catch (err) {
|
|
2045
2045
|
process.stderr.write(
|
|
@@ -2093,8 +2093,8 @@ async function loadWorld(worldPath) {
|
|
|
2093
2093
|
}
|
|
2094
2094
|
async function loadBundledWorld(name = DEFAULT_BUNDLED_WORLD) {
|
|
2095
2095
|
const { readFile: readFile4 } = await import("fs/promises");
|
|
2096
|
-
const { join:
|
|
2097
|
-
const { existsSync:
|
|
2096
|
+
const { join: join16, dirname: dirname4 } = await import("path");
|
|
2097
|
+
const { existsSync: existsSync12 } = await import("fs");
|
|
2098
2098
|
const { fileURLToPath: fileURLToPath3 } = await import("url");
|
|
2099
2099
|
const { parseWorldMarkdown: parseWorldMarkdown2 } = await Promise.resolve().then(() => (init_bootstrap_parser(), bootstrap_parser_exports));
|
|
2100
2100
|
const { emitWorldDefinition: emitWorldDefinition2 } = await Promise.resolve().then(() => (init_bootstrap_emitter(), bootstrap_emitter_exports));
|
|
@@ -2102,16 +2102,16 @@ async function loadBundledWorld(name = DEFAULT_BUNDLED_WORLD) {
|
|
|
2102
2102
|
let packageRoot;
|
|
2103
2103
|
try {
|
|
2104
2104
|
const thisFile = typeof __dirname !== "undefined" ? __dirname : dirname4(fileURLToPath3(import_meta.url));
|
|
2105
|
-
packageRoot =
|
|
2105
|
+
packageRoot = join16(thisFile, "..", "..");
|
|
2106
2106
|
} catch {
|
|
2107
2107
|
packageRoot = process.cwd();
|
|
2108
2108
|
}
|
|
2109
2109
|
const candidates = [
|
|
2110
|
-
|
|
2111
|
-
|
|
2110
|
+
join16(packageRoot, "dist", "worlds", filename),
|
|
2111
|
+
join16(packageRoot, "src", "worlds", filename)
|
|
2112
2112
|
];
|
|
2113
2113
|
for (const candidate of candidates) {
|
|
2114
|
-
if (
|
|
2114
|
+
if (existsSync12(candidate)) {
|
|
2115
2115
|
const markdown = await readFile4(candidate, "utf-8");
|
|
2116
2116
|
const parsed = parseWorldMarkdown2(markdown);
|
|
2117
2117
|
if (!parsed.world) {
|
|
@@ -2141,8 +2141,8 @@ function slugify(text) {
|
|
|
2141
2141
|
}
|
|
2142
2142
|
async function addGuard(worldDir, input) {
|
|
2143
2143
|
const { readFile: readFile4, writeFile: writeFile6 } = await import("fs/promises");
|
|
2144
|
-
const { join:
|
|
2145
|
-
const guardsPath =
|
|
2144
|
+
const { join: join16 } = await import("path");
|
|
2145
|
+
const guardsPath = join16(worldDir, "guards.json");
|
|
2146
2146
|
let config;
|
|
2147
2147
|
try {
|
|
2148
2148
|
const raw = await readFile4(guardsPath, "utf-8");
|
|
@@ -2182,13 +2182,13 @@ async function addGuard(worldDir, input) {
|
|
|
2182
2182
|
}
|
|
2183
2183
|
async function addRule(worldDir, input) {
|
|
2184
2184
|
const { readFile: readFile4, writeFile: writeFile6, mkdir: mkdir3 } = await import("fs/promises");
|
|
2185
|
-
const { join:
|
|
2186
|
-
const { readdirSync:
|
|
2187
|
-
const rulesDir =
|
|
2185
|
+
const { join: join16 } = await import("path");
|
|
2186
|
+
const { readdirSync: readdirSync7 } = await import("fs");
|
|
2187
|
+
const rulesDir = join16(worldDir, "rules");
|
|
2188
2188
|
await mkdir3(rulesDir, { recursive: true });
|
|
2189
2189
|
let nextNum = 1;
|
|
2190
2190
|
try {
|
|
2191
|
-
const existing =
|
|
2191
|
+
const existing = readdirSync7(rulesDir).filter((f) => f.match(/^rule-\d+\.json$/)).sort();
|
|
2192
2192
|
if (existing.length > 0) {
|
|
2193
2193
|
const lastFile = existing[existing.length - 1];
|
|
2194
2194
|
const match = lastFile.match(/rule-(\d+)\.json/);
|
|
@@ -2214,7 +2214,7 @@ async function addRule(worldDir, input) {
|
|
|
2214
2214
|
effect_text: input.effects ? input.effects.map((e) => `${e.target} ${e.operation} ${e.value}`).join(", ") : "No direct effects"
|
|
2215
2215
|
}
|
|
2216
2216
|
};
|
|
2217
|
-
const rulePath =
|
|
2217
|
+
const rulePath = join16(rulesDir, `rule-${ruleNum}.json`);
|
|
2218
2218
|
await writeFile6(rulePath, JSON.stringify(rule, null, 2) + "\n");
|
|
2219
2219
|
const { loadWorldFromDirectory: loadWorldFromDirectory3 } = await Promise.resolve().then(() => (init_world_loader(), world_loader_exports));
|
|
2220
2220
|
const world = await loadWorldFromDirectory3(worldDir);
|
|
@@ -2230,8 +2230,8 @@ async function addRule(worldDir, input) {
|
|
|
2230
2230
|
}
|
|
2231
2231
|
async function addInvariant(worldDir, input) {
|
|
2232
2232
|
const { readFile: readFile4, writeFile: writeFile6 } = await import("fs/promises");
|
|
2233
|
-
const { join:
|
|
2234
|
-
const invariantsPath =
|
|
2233
|
+
const { join: join16 } = await import("path");
|
|
2234
|
+
const invariantsPath = join16(worldDir, "invariants.json");
|
|
2235
2235
|
let config;
|
|
2236
2236
|
try {
|
|
2237
2237
|
const raw = await readFile4(invariantsPath, "utf-8");
|
|
@@ -2849,7 +2849,7 @@ ${candidates.map((c) => ` - ${c}`).join("\n")}`
|
|
|
2849
2849
|
}
|
|
2850
2850
|
async function collectMarkdownSources(inputPath) {
|
|
2851
2851
|
const { stat, readFile: rf, readdir } = await import("fs/promises");
|
|
2852
|
-
const { join: pathJoin, extname:
|
|
2852
|
+
const { join: pathJoin, extname: extname3, basename: basename4 } = await import("path");
|
|
2853
2853
|
const stats = await stat(inputPath);
|
|
2854
2854
|
if (stats.isFile()) {
|
|
2855
2855
|
const content = await rf(inputPath, "utf-8");
|
|
@@ -2857,20 +2857,20 @@ async function collectMarkdownSources(inputPath) {
|
|
|
2857
2857
|
}
|
|
2858
2858
|
if (stats.isDirectory()) {
|
|
2859
2859
|
const sources = [];
|
|
2860
|
-
await collectDir(inputPath, sources, rf, pathJoin,
|
|
2860
|
+
await collectDir(inputPath, sources, rf, pathJoin, extname3, basename4);
|
|
2861
2861
|
sources.sort((a, b) => a.filename.localeCompare(b.filename));
|
|
2862
2862
|
return sources;
|
|
2863
2863
|
}
|
|
2864
2864
|
throw new Error(`Input path is neither a file nor a directory: ${inputPath}`);
|
|
2865
2865
|
}
|
|
2866
|
-
async function collectDir(dir, sources, rf, pathJoin,
|
|
2866
|
+
async function collectDir(dir, sources, rf, pathJoin, extname3, basename4) {
|
|
2867
2867
|
const { readdir } = await import("fs/promises");
|
|
2868
2868
|
const entries = await readdir(dir, { withFileTypes: true });
|
|
2869
2869
|
for (const entry of entries) {
|
|
2870
2870
|
const fullPath = pathJoin(dir, entry.name);
|
|
2871
2871
|
if (entry.isDirectory()) {
|
|
2872
|
-
await collectDir(fullPath, sources, rf, pathJoin,
|
|
2873
|
-
} else if (entry.isFile() &&
|
|
2872
|
+
await collectDir(fullPath, sources, rf, pathJoin, extname3, basename4);
|
|
2873
|
+
} else if (entry.isFile() && extname3(entry.name).toLowerCase() === ".md") {
|
|
2874
2874
|
const content = await rf(fullPath, "utf-8");
|
|
2875
2875
|
sources.push({ filename: entry.name, content });
|
|
2876
2876
|
}
|
|
@@ -3528,21 +3528,21 @@ function parseArgs2(argv) {
|
|
|
3528
3528
|
}
|
|
3529
3529
|
async function writeWorldFiles(outputDir, world) {
|
|
3530
3530
|
const { writeFile: writeFile6, mkdir: mkdir3 } = await import("fs/promises");
|
|
3531
|
-
const { join:
|
|
3531
|
+
const { join: join16 } = await import("path");
|
|
3532
3532
|
await mkdir3(outputDir, { recursive: true });
|
|
3533
|
-
await writeFile6(
|
|
3534
|
-
await writeFile6(
|
|
3535
|
-
await writeFile6(
|
|
3536
|
-
await writeFile6(
|
|
3537
|
-
await writeFile6(
|
|
3538
|
-
await writeFile6(
|
|
3539
|
-
await writeFile6(
|
|
3540
|
-
const rulesDir =
|
|
3533
|
+
await writeFile6(join16(outputDir, "world.json"), JSON.stringify(world.world, null, 2));
|
|
3534
|
+
await writeFile6(join16(outputDir, "invariants.json"), JSON.stringify({ invariants: world.invariants }, null, 2));
|
|
3535
|
+
await writeFile6(join16(outputDir, "assumptions.json"), JSON.stringify(world.assumptions, null, 2));
|
|
3536
|
+
await writeFile6(join16(outputDir, "state-schema.json"), JSON.stringify(world.stateSchema, null, 2));
|
|
3537
|
+
await writeFile6(join16(outputDir, "gates.json"), JSON.stringify(world.gates, null, 2));
|
|
3538
|
+
await writeFile6(join16(outputDir, "outcomes.json"), JSON.stringify(world.outcomes, null, 2));
|
|
3539
|
+
await writeFile6(join16(outputDir, "metadata.json"), JSON.stringify(world.metadata, null, 2));
|
|
3540
|
+
const rulesDir = join16(outputDir, "rules");
|
|
3541
3541
|
await mkdir3(rulesDir, { recursive: true });
|
|
3542
3542
|
const sortedRules = [...world.rules].sort((a, b) => a.order - b.order);
|
|
3543
3543
|
for (let i = 0; i < sortedRules.length; i++) {
|
|
3544
3544
|
const ruleNum = String(i + 1).padStart(3, "0");
|
|
3545
|
-
await writeFile6(
|
|
3545
|
+
await writeFile6(join16(rulesDir, `rule-${ruleNum}.json`), JSON.stringify(sortedRules[i], null, 2));
|
|
3546
3546
|
}
|
|
3547
3547
|
}
|
|
3548
3548
|
function write(msg) {
|
|
@@ -3673,9 +3673,9 @@ World source written to: ${derivedPath}
|
|
|
3673
3673
|
try {
|
|
3674
3674
|
const emitResult = emitWorldDefinition(parseResult.world);
|
|
3675
3675
|
await writeWorldFiles(outputDir, emitResult.world);
|
|
3676
|
-
const { join:
|
|
3676
|
+
const { join: join16 } = await import("path");
|
|
3677
3677
|
const { copyFile } = await import("fs/promises");
|
|
3678
|
-
const sourceDest =
|
|
3678
|
+
const sourceDest = join16(outputDir, "source.nv-world.md");
|
|
3679
3679
|
if (derivedPath !== sourceDest) {
|
|
3680
3680
|
await copyFile(derivedPath, sourceDest);
|
|
3681
3681
|
}
|
|
@@ -5009,8 +5009,8 @@ async function main6(argv = process.argv.slice(2)) {
|
|
|
5009
5009
|
try {
|
|
5010
5010
|
const args = parseArgs6(argv);
|
|
5011
5011
|
const { writeFile: writeFile6 } = await import("fs/promises");
|
|
5012
|
-
const { existsSync:
|
|
5013
|
-
if (
|
|
5012
|
+
const { existsSync: existsSync12 } = await import("fs");
|
|
5013
|
+
if (existsSync12(args.outputPath)) {
|
|
5014
5014
|
process.stderr.write(`File already exists: ${args.outputPath}
|
|
5015
5015
|
`);
|
|
5016
5016
|
process.stderr.write("Use a different --output path or remove the existing file.\n");
|
|
@@ -5868,22 +5868,22 @@ function parseArgs9(argv) {
|
|
|
5868
5868
|
}
|
|
5869
5869
|
async function writeWorldFiles2(outputDir, world) {
|
|
5870
5870
|
const { writeFile: writeFile6, mkdir: mkdir3 } = await import("fs/promises");
|
|
5871
|
-
const { join:
|
|
5871
|
+
const { join: join16 } = await import("path");
|
|
5872
5872
|
await mkdir3(outputDir, { recursive: true });
|
|
5873
|
-
await writeFile6(
|
|
5874
|
-
await writeFile6(
|
|
5875
|
-
await writeFile6(
|
|
5876
|
-
await writeFile6(
|
|
5877
|
-
const rulesDir =
|
|
5873
|
+
await writeFile6(join16(outputDir, "world.json"), JSON.stringify(world.world, null, 2));
|
|
5874
|
+
await writeFile6(join16(outputDir, "invariants.json"), JSON.stringify({ invariants: world.invariants }, null, 2));
|
|
5875
|
+
await writeFile6(join16(outputDir, "assumptions.json"), JSON.stringify(world.assumptions, null, 2));
|
|
5876
|
+
await writeFile6(join16(outputDir, "state-schema.json"), JSON.stringify(world.stateSchema, null, 2));
|
|
5877
|
+
const rulesDir = join16(outputDir, "rules");
|
|
5878
5878
|
await mkdir3(rulesDir, { recursive: true });
|
|
5879
5879
|
const sortedRules = [...world.rules].sort((a, b) => a.order - b.order);
|
|
5880
5880
|
for (let i = 0; i < sortedRules.length; i++) {
|
|
5881
5881
|
const ruleNum = String(i + 1).padStart(3, "0");
|
|
5882
|
-
await writeFile6(
|
|
5882
|
+
await writeFile6(join16(rulesDir, `rule-${ruleNum}.json`), JSON.stringify(sortedRules[i], null, 2));
|
|
5883
5883
|
}
|
|
5884
|
-
await writeFile6(
|
|
5885
|
-
await writeFile6(
|
|
5886
|
-
await writeFile6(
|
|
5884
|
+
await writeFile6(join16(outputDir, "gates.json"), JSON.stringify(world.gates, null, 2));
|
|
5885
|
+
await writeFile6(join16(outputDir, "outcomes.json"), JSON.stringify(world.outcomes, null, 2));
|
|
5886
|
+
await writeFile6(join16(outputDir, "metadata.json"), JSON.stringify(world.metadata, null, 2));
|
|
5887
5887
|
}
|
|
5888
5888
|
async function main9(argv = process.argv.slice(2)) {
|
|
5889
5889
|
const startTime = performance.now();
|
|
@@ -8242,13 +8242,13 @@ function handleHealthCheck() {
|
|
|
8242
8242
|
}
|
|
8243
8243
|
async function handleListPresets(policiesDir) {
|
|
8244
8244
|
const { readdir, readFile: readFile4 } = await import("fs/promises");
|
|
8245
|
-
const { join:
|
|
8246
|
-
const dir = policiesDir ??
|
|
8245
|
+
const { join: join16 } = await import("path");
|
|
8246
|
+
const dir = policiesDir ?? join16(process.cwd(), "policies");
|
|
8247
8247
|
const presets = [];
|
|
8248
8248
|
try {
|
|
8249
8249
|
const files = await readdir(dir);
|
|
8250
8250
|
for (const file of files.filter((f) => f.endsWith(".txt")).sort()) {
|
|
8251
|
-
const content = await readFile4(
|
|
8251
|
+
const content = await readFile4(join16(dir, file), "utf-8");
|
|
8252
8252
|
const id = file.replace(".txt", "");
|
|
8253
8253
|
const name = id.split("-").map((w) => w.charAt(0).toUpperCase() + w.slice(1)).join(" ");
|
|
8254
8254
|
const firstLine = content.split("\n").find((l) => l.trim().length > 0) ?? "";
|
|
@@ -8320,7 +8320,7 @@ function govern(action, world, options) {
|
|
|
8320
8320
|
}
|
|
8321
8321
|
async function writeTempWorld(dir, policyLines) {
|
|
8322
8322
|
const { writeFile: writeFile6, mkdir: mkdir3 } = await import("fs/promises");
|
|
8323
|
-
const { join:
|
|
8323
|
+
const { join: join16 } = await import("path");
|
|
8324
8324
|
await mkdir3(dir, { recursive: true });
|
|
8325
8325
|
const worldJson = {
|
|
8326
8326
|
world_id: "demo-live",
|
|
@@ -8362,9 +8362,9 @@ async function writeTempWorld(dir, policyLines) {
|
|
|
8362
8362
|
authoring_method: "manual-authoring"
|
|
8363
8363
|
};
|
|
8364
8364
|
await Promise.all([
|
|
8365
|
-
writeFile6(
|
|
8366
|
-
writeFile6(
|
|
8367
|
-
writeFile6(
|
|
8365
|
+
writeFile6(join16(dir, "world.json"), JSON.stringify(worldJson, null, 2)),
|
|
8366
|
+
writeFile6(join16(dir, "kernel.json"), JSON.stringify(kernelJson, null, 2)),
|
|
8367
|
+
writeFile6(join16(dir, "metadata.json"), JSON.stringify(metadataJson, null, 2))
|
|
8368
8368
|
]);
|
|
8369
8369
|
}
|
|
8370
8370
|
var init_govern = __esm({
|
|
@@ -8708,10 +8708,10 @@ data: ${data}
|
|
|
8708
8708
|
res.setHeader("Access-Control-Allow-Headers", "Content-Type, Authorization");
|
|
8709
8709
|
}
|
|
8710
8710
|
function readBody(req) {
|
|
8711
|
-
return new Promise((
|
|
8711
|
+
return new Promise((resolve8, reject) => {
|
|
8712
8712
|
const chunks = [];
|
|
8713
8713
|
req.on("data", (chunk) => chunks.push(chunk));
|
|
8714
|
-
req.on("end", () =>
|
|
8714
|
+
req.on("end", () => resolve8(Buffer.concat(chunks).toString("utf-8")));
|
|
8715
8715
|
req.on("error", reject);
|
|
8716
8716
|
});
|
|
8717
8717
|
}
|
|
@@ -11475,9 +11475,9 @@ async function main15(argv) {
|
|
|
11475
11475
|
}
|
|
11476
11476
|
if (worldPath) {
|
|
11477
11477
|
try {
|
|
11478
|
-
const { existsSync:
|
|
11479
|
-
const { join:
|
|
11480
|
-
const hasWorld =
|
|
11478
|
+
const { existsSync: existsSync12 } = await import("fs");
|
|
11479
|
+
const { join: join16 } = await import("path");
|
|
11480
|
+
const hasWorld = existsSync12(join16(worldPath, "world.json"));
|
|
11481
11481
|
checks.push({
|
|
11482
11482
|
label: "World file detected",
|
|
11483
11483
|
status: hasWorld ? "pass" : "fail",
|
|
@@ -11492,12 +11492,12 @@ async function main15(argv) {
|
|
|
11492
11492
|
});
|
|
11493
11493
|
}
|
|
11494
11494
|
} else {
|
|
11495
|
-
const { existsSync:
|
|
11496
|
-
const { join:
|
|
11495
|
+
const { existsSync: existsSync12 } = await import("fs");
|
|
11496
|
+
const { join: join16 } = await import("path");
|
|
11497
11497
|
const candidates = ["./world", "./.neuroverse", "./worlds"];
|
|
11498
11498
|
let found;
|
|
11499
11499
|
for (const dir of candidates) {
|
|
11500
|
-
if (
|
|
11500
|
+
if (existsSync12(join16(dir, "world.json"))) {
|
|
11501
11501
|
found = dir;
|
|
11502
11502
|
break;
|
|
11503
11503
|
}
|
|
@@ -13148,7 +13148,7 @@ async function runPipeMode(config) {
|
|
|
13148
13148
|
`);
|
|
13149
13149
|
}
|
|
13150
13150
|
const MAX_BUFFER_SIZE = 1e6;
|
|
13151
|
-
return new Promise((
|
|
13151
|
+
return new Promise((resolve8, reject) => {
|
|
13152
13152
|
let buffer = "";
|
|
13153
13153
|
process.stdin.setEncoding("utf-8");
|
|
13154
13154
|
process.stdin.on("data", (chunk) => {
|
|
@@ -13194,7 +13194,7 @@ async function runPipeMode(config) {
|
|
|
13194
13194
|
`[neuroverse] Session complete: ${finalState.actionsEvaluated} evaluated, ${finalState.actionsAllowed} allowed, ${finalState.actionsBlocked} blocked, ${finalState.actionsPaused} paused
|
|
13195
13195
|
`
|
|
13196
13196
|
);
|
|
13197
|
-
|
|
13197
|
+
resolve8();
|
|
13198
13198
|
});
|
|
13199
13199
|
process.stdin.on("error", reject);
|
|
13200
13200
|
});
|
|
@@ -13310,8 +13310,8 @@ ${response.content}
|
|
|
13310
13310
|
rl2.on("close", () => {
|
|
13311
13311
|
session.stop();
|
|
13312
13312
|
});
|
|
13313
|
-
return new Promise((
|
|
13314
|
-
rl2.on("close",
|
|
13313
|
+
return new Promise((resolve8) => {
|
|
13314
|
+
rl2.on("close", resolve8);
|
|
13315
13315
|
});
|
|
13316
13316
|
}
|
|
13317
13317
|
var SessionManager;
|
|
@@ -14422,27 +14422,27 @@ function computeWorldDiff(a, b) {
|
|
|
14422
14422
|
}
|
|
14423
14423
|
async function worldSnapshot(worldPath) {
|
|
14424
14424
|
const { readdir, readFile: readFile4, mkdir: mkdir3, writeFile: writeFile6 } = await import("fs/promises");
|
|
14425
|
-
const { join:
|
|
14425
|
+
const { join: join16 } = await import("path");
|
|
14426
14426
|
const timestamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-").slice(0, 19);
|
|
14427
|
-
const snapshotDir =
|
|
14427
|
+
const snapshotDir = join16(worldPath, ".snapshots", timestamp);
|
|
14428
14428
|
await mkdir3(snapshotDir, { recursive: true });
|
|
14429
14429
|
const files = await readdir(worldPath);
|
|
14430
14430
|
let copied = 0;
|
|
14431
14431
|
for (const file of files) {
|
|
14432
14432
|
if (file.endsWith(".json")) {
|
|
14433
|
-
const content = await readFile4(
|
|
14434
|
-
await writeFile6(
|
|
14433
|
+
const content = await readFile4(join16(worldPath, file), "utf-8");
|
|
14434
|
+
await writeFile6(join16(snapshotDir, file), content, "utf-8");
|
|
14435
14435
|
copied++;
|
|
14436
14436
|
}
|
|
14437
14437
|
}
|
|
14438
14438
|
try {
|
|
14439
|
-
const rulesDir =
|
|
14439
|
+
const rulesDir = join16(worldPath, "rules");
|
|
14440
14440
|
const ruleFiles = await readdir(rulesDir);
|
|
14441
|
-
await mkdir3(
|
|
14441
|
+
await mkdir3(join16(snapshotDir, "rules"), { recursive: true });
|
|
14442
14442
|
for (const file of ruleFiles) {
|
|
14443
14443
|
if (file.endsWith(".json")) {
|
|
14444
|
-
const content = await readFile4(
|
|
14445
|
-
await writeFile6(
|
|
14444
|
+
const content = await readFile4(join16(rulesDir, file), "utf-8");
|
|
14445
|
+
await writeFile6(join16(snapshotDir, "rules", file), content, "utf-8");
|
|
14446
14446
|
copied++;
|
|
14447
14447
|
}
|
|
14448
14448
|
}
|
|
@@ -14455,8 +14455,8 @@ async function worldSnapshot(worldPath) {
|
|
|
14455
14455
|
}
|
|
14456
14456
|
async function worldRollback(worldPath) {
|
|
14457
14457
|
const { readdir, readFile: readFile4, writeFile: writeFile6, mkdir: mkdir3 } = await import("fs/promises");
|
|
14458
|
-
const { join:
|
|
14459
|
-
const snapshotsDir =
|
|
14458
|
+
const { join: join16 } = await import("path");
|
|
14459
|
+
const snapshotsDir = join16(worldPath, ".snapshots");
|
|
14460
14460
|
let snapshots;
|
|
14461
14461
|
try {
|
|
14462
14462
|
snapshots = (await readdir(snapshotsDir)).sort();
|
|
@@ -14471,34 +14471,34 @@ async function worldRollback(worldPath) {
|
|
|
14471
14471
|
return;
|
|
14472
14472
|
}
|
|
14473
14473
|
const latest = snapshots[snapshots.length - 1];
|
|
14474
|
-
const snapshotDir =
|
|
14474
|
+
const snapshotDir = join16(snapshotsDir, latest);
|
|
14475
14475
|
const backupTimestamp = "pre-rollback-" + (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-").slice(0, 19);
|
|
14476
|
-
const backupDir =
|
|
14476
|
+
const backupDir = join16(snapshotsDir, backupTimestamp);
|
|
14477
14477
|
await mkdir3(backupDir, { recursive: true });
|
|
14478
14478
|
const currentFiles = await readdir(worldPath);
|
|
14479
14479
|
for (const file of currentFiles) {
|
|
14480
14480
|
if (file.endsWith(".json")) {
|
|
14481
|
-
const content = await readFile4(
|
|
14482
|
-
await writeFile6(
|
|
14481
|
+
const content = await readFile4(join16(worldPath, file), "utf-8");
|
|
14482
|
+
await writeFile6(join16(backupDir, file), content, "utf-8");
|
|
14483
14483
|
}
|
|
14484
14484
|
}
|
|
14485
14485
|
const snapshotFiles = await readdir(snapshotDir);
|
|
14486
14486
|
let restored = 0;
|
|
14487
14487
|
for (const file of snapshotFiles) {
|
|
14488
14488
|
if (file.endsWith(".json")) {
|
|
14489
|
-
const content = await readFile4(
|
|
14490
|
-
await writeFile6(
|
|
14489
|
+
const content = await readFile4(join16(snapshotDir, file), "utf-8");
|
|
14490
|
+
await writeFile6(join16(worldPath, file), content, "utf-8");
|
|
14491
14491
|
restored++;
|
|
14492
14492
|
}
|
|
14493
14493
|
}
|
|
14494
14494
|
try {
|
|
14495
|
-
const rulesDir =
|
|
14495
|
+
const rulesDir = join16(snapshotDir, "rules");
|
|
14496
14496
|
const ruleFiles = await readdir(rulesDir);
|
|
14497
|
-
await mkdir3(
|
|
14497
|
+
await mkdir3(join16(worldPath, "rules"), { recursive: true });
|
|
14498
14498
|
for (const file of ruleFiles) {
|
|
14499
14499
|
if (file.endsWith(".json")) {
|
|
14500
|
-
const content = await readFile4(
|
|
14501
|
-
await writeFile6(
|
|
14500
|
+
const content = await readFile4(join16(rulesDir, file), "utf-8");
|
|
14501
|
+
await writeFile6(join16(worldPath, "rules", file), content, "utf-8");
|
|
14502
14502
|
restored++;
|
|
14503
14503
|
}
|
|
14504
14504
|
}
|
|
@@ -16400,27 +16400,27 @@ function closePrompts() {
|
|
|
16400
16400
|
}
|
|
16401
16401
|
function ask(question, defaultValue) {
|
|
16402
16402
|
const suffix = defaultValue ? ` [${defaultValue}]` : "";
|
|
16403
|
-
return new Promise((
|
|
16403
|
+
return new Promise((resolve8) => {
|
|
16404
16404
|
getRL().question(`
|
|
16405
16405
|
${question}${suffix}: `, (answer) => {
|
|
16406
16406
|
const val = answer.trim();
|
|
16407
|
-
|
|
16407
|
+
resolve8(val || defaultValue || "");
|
|
16408
16408
|
});
|
|
16409
16409
|
});
|
|
16410
16410
|
}
|
|
16411
16411
|
function confirm(question, defaultYes = true) {
|
|
16412
16412
|
const hint = defaultYes ? "[Y/n]" : "[y/N]";
|
|
16413
|
-
return new Promise((
|
|
16413
|
+
return new Promise((resolve8) => {
|
|
16414
16414
|
getRL().question(`
|
|
16415
16415
|
${question} ${hint}: `, (answer) => {
|
|
16416
16416
|
const val = answer.trim().toLowerCase();
|
|
16417
|
-
if (val === "")
|
|
16418
|
-
else
|
|
16417
|
+
if (val === "") resolve8(defaultYes);
|
|
16418
|
+
else resolve8(val === "y" || val === "yes");
|
|
16419
16419
|
});
|
|
16420
16420
|
});
|
|
16421
16421
|
}
|
|
16422
16422
|
function choose(question, options) {
|
|
16423
|
-
return new Promise((
|
|
16423
|
+
return new Promise((resolve8) => {
|
|
16424
16424
|
const r = getRL();
|
|
16425
16425
|
r.write(`
|
|
16426
16426
|
${question}
|
|
@@ -16430,9 +16430,9 @@ function choose(question, options) {
|
|
|
16430
16430
|
r.question(` Choice [1-${options.length}]: `, (answer) => {
|
|
16431
16431
|
const idx = parseInt(answer.trim(), 10) - 1;
|
|
16432
16432
|
if (idx >= 0 && idx < options.length) {
|
|
16433
|
-
|
|
16433
|
+
resolve8(options[idx]);
|
|
16434
16434
|
} else {
|
|
16435
|
-
|
|
16435
|
+
resolve8(options[0]);
|
|
16436
16436
|
}
|
|
16437
16437
|
});
|
|
16438
16438
|
});
|
|
@@ -16901,15 +16901,15 @@ function generateWorld(state) {
|
|
|
16901
16901
|
return { worldJson, stateSchema, guardsJson, rules, gatesJson, invariants, outcomes, metadata };
|
|
16902
16902
|
}
|
|
16903
16903
|
async function writeWorld(outputDir, world) {
|
|
16904
|
-
const { mkdirSync: mkdirSync5, existsSync:
|
|
16904
|
+
const { mkdirSync: mkdirSync5, existsSync: existsSync12 } = await import("fs");
|
|
16905
16905
|
const { writeFile: writeFile6 } = await import("fs/promises");
|
|
16906
|
-
const { join:
|
|
16906
|
+
const { join: join16 } = await import("path");
|
|
16907
16907
|
const files = [];
|
|
16908
|
-
if (!
|
|
16909
|
-
const rulesDir =
|
|
16910
|
-
if (!
|
|
16908
|
+
if (!existsSync12(outputDir)) mkdirSync5(outputDir, { recursive: true });
|
|
16909
|
+
const rulesDir = join16(outputDir, "rules");
|
|
16910
|
+
if (!existsSync12(rulesDir)) mkdirSync5(rulesDir, { recursive: true });
|
|
16911
16911
|
const writeJson = async (name, data) => {
|
|
16912
|
-
const path =
|
|
16912
|
+
const path = join16(outputDir, name);
|
|
16913
16913
|
await writeFile6(path, JSON.stringify(data, null, 2) + "\n", "utf-8");
|
|
16914
16914
|
files.push(path);
|
|
16915
16915
|
};
|
|
@@ -16921,7 +16921,7 @@ async function writeWorld(outputDir, world) {
|
|
|
16921
16921
|
await writeJson("outcomes.json", world.outcomes);
|
|
16922
16922
|
await writeJson("metadata.json", world.metadata);
|
|
16923
16923
|
for (const rule of world.rules) {
|
|
16924
|
-
const rulePath =
|
|
16924
|
+
const rulePath = join16(rulesDir, `${rule.id}.json`);
|
|
16925
16925
|
await writeFile6(rulePath, JSON.stringify(rule, null, 2) + "\n", "utf-8");
|
|
16926
16926
|
files.push(rulePath);
|
|
16927
16927
|
}
|
|
@@ -17008,7 +17008,7 @@ async function main31(argv = process.argv.slice(2)) {
|
|
|
17008
17008
|
}
|
|
17009
17009
|
async function phaseRefine(outputDir, world) {
|
|
17010
17010
|
const { writeFile: writeFile6 } = await import("fs/promises");
|
|
17011
|
-
const { join:
|
|
17011
|
+
const { join: join16 } = await import("path");
|
|
17012
17012
|
heading("Refinement: Thresholds & Collapse");
|
|
17013
17013
|
const primaryMetric = world.gatesJson.viability_classification[0]?.field || "system_health";
|
|
17014
17014
|
info(`
|
|
@@ -17025,7 +17025,7 @@ async function phaseRefine(outputDir, world) {
|
|
|
17025
17025
|
if (!isNaN(parsed)) gate.value = parsed;
|
|
17026
17026
|
}
|
|
17027
17027
|
await writeFile6(
|
|
17028
|
-
|
|
17028
|
+
join16(outputDir, "gates.json"),
|
|
17029
17029
|
JSON.stringify(world.gatesJson, null, 2) + "\n",
|
|
17030
17030
|
"utf-8"
|
|
17031
17031
|
);
|
|
@@ -17047,7 +17047,7 @@ async function phaseRefine(outputDir, world) {
|
|
|
17047
17047
|
result: "MODEL_COLLAPSES"
|
|
17048
17048
|
};
|
|
17049
17049
|
await writeFile6(
|
|
17050
|
-
|
|
17050
|
+
join16(outputDir, "rules", `${rule.id}.json`),
|
|
17051
17051
|
JSON.stringify(rule, null, 2) + "\n",
|
|
17052
17052
|
"utf-8"
|
|
17053
17053
|
);
|
|
@@ -17151,22 +17151,22 @@ ${parts.join(". ")}.
|
|
|
17151
17151
|
`;
|
|
17152
17152
|
}
|
|
17153
17153
|
function previewLens(lens) {
|
|
17154
|
-
const
|
|
17155
|
-
const
|
|
17154
|
+
const BOLD4 = "\x1B[1m";
|
|
17155
|
+
const DIM4 = "\x1B[2m";
|
|
17156
17156
|
const CYAN3 = "\x1B[36m";
|
|
17157
|
-
const
|
|
17157
|
+
const YELLOW4 = "\x1B[33m";
|
|
17158
17158
|
const GREEN3 = "\x1B[32m";
|
|
17159
|
-
const
|
|
17159
|
+
const RESET4 = "\x1B[0m";
|
|
17160
17160
|
const lines = [];
|
|
17161
17161
|
lines.push("");
|
|
17162
|
-
lines.push(`${
|
|
17163
|
-
lines.push(`${
|
|
17162
|
+
lines.push(`${BOLD4}${CYAN3} ${lens.name}${RESET4} ${DIM4}\u2014 ${lens.tagline}${RESET4}`);
|
|
17163
|
+
lines.push(`${DIM4} ${lens.description}${RESET4}`);
|
|
17164
17164
|
lines.push("");
|
|
17165
17165
|
for (const d of lens.directives) {
|
|
17166
17166
|
if (d.example) {
|
|
17167
|
-
lines.push(` ${
|
|
17168
|
-
lines.push(` ${
|
|
17169
|
-
lines.push(` ${GREEN3}With:${
|
|
17167
|
+
lines.push(` ${BOLD4}${d.id}${RESET4}`);
|
|
17168
|
+
lines.push(` ${YELLOW4}Without:${RESET4} ${DIM4}${d.example.without}${RESET4}`);
|
|
17169
|
+
lines.push(` ${GREEN3}With:${RESET4} ${d.example.with}`);
|
|
17170
17170
|
lines.push("");
|
|
17171
17171
|
}
|
|
17172
17172
|
}
|
|
@@ -18100,9 +18100,9 @@ async function cmdAdd(argv) {
|
|
|
18100
18100
|
id = name.toLowerCase().replace(/[^a-z0-9]+/g, "_").replace(/^_|_$/g, "");
|
|
18101
18101
|
}
|
|
18102
18102
|
const { readFile: readFile4, writeFile: writeFile6 } = await import("fs/promises");
|
|
18103
|
-
const { join:
|
|
18103
|
+
const { join: join16 } = await import("path");
|
|
18104
18104
|
const possiblePaths = [
|
|
18105
|
-
|
|
18105
|
+
join16(worldPath, "world.nv-world.md"),
|
|
18106
18106
|
worldPath
|
|
18107
18107
|
];
|
|
18108
18108
|
let mdPath = "";
|
|
@@ -18123,7 +18123,7 @@ async function cmdAdd(argv) {
|
|
|
18123
18123
|
const files = await readdir(worldPath);
|
|
18124
18124
|
const mdFile = files.find((f) => f.endsWith(".nv-world.md"));
|
|
18125
18125
|
if (mdFile) {
|
|
18126
|
-
mdPath =
|
|
18126
|
+
mdPath = join16(worldPath, mdFile);
|
|
18127
18127
|
mdContent = await readFile4(mdPath, "utf-8");
|
|
18128
18128
|
}
|
|
18129
18129
|
} catch {
|
|
@@ -19867,113 +19867,2182 @@ Examples:
|
|
|
19867
19867
|
}
|
|
19868
19868
|
});
|
|
19869
19869
|
|
|
19870
|
+
// src/radiant/lenses/auki-builder.ts
|
|
19871
|
+
function aukiBuilderRewrite(pattern) {
|
|
19872
|
+
if (pattern.evidence.cited_invariant) {
|
|
19873
|
+
return {
|
|
19874
|
+
...pattern,
|
|
19875
|
+
framing: "invariant pressure",
|
|
19876
|
+
emphasis: "worldmodel invariant cited by this observation \u2014 surface the cross-reference",
|
|
19877
|
+
compress: true
|
|
19878
|
+
};
|
|
19879
|
+
}
|
|
19880
|
+
if (pattern.type === "candidate") {
|
|
19881
|
+
return {
|
|
19882
|
+
...pattern,
|
|
19883
|
+
framing: "emergent observation (not yet in worldmodel)",
|
|
19884
|
+
emphasis: "candidate pattern \u2014 surface the vanguard-domain analysis (which domain activated this?)",
|
|
19885
|
+
compress: true
|
|
19886
|
+
};
|
|
19887
|
+
}
|
|
19888
|
+
return {
|
|
19889
|
+
...pattern,
|
|
19890
|
+
framing: "system-level consequence",
|
|
19891
|
+
emphasis: "coordination + leverage",
|
|
19892
|
+
compress: true
|
|
19893
|
+
};
|
|
19894
|
+
}
|
|
19895
|
+
var AUKI_VANGUARD_FRAME, AUKI_VOCABULARY, AUKI_VOICE, AUKI_FORBIDDEN_PHRASES, AUKI_PREFERRED_PATTERNS, AUKI_STRATEGIC_PATTERNS, AUKI_EXEMPLARS, aukiBuilderLens;
|
|
19896
|
+
var init_auki_builder = __esm({
|
|
19897
|
+
"src/radiant/lenses/auki-builder.ts"() {
|
|
19898
|
+
"use strict";
|
|
19899
|
+
AUKI_VANGUARD_FRAME = {
|
|
19900
|
+
domains: [
|
|
19901
|
+
"future-foresight",
|
|
19902
|
+
"narrative-dynamics",
|
|
19903
|
+
"shared-prosperity"
|
|
19904
|
+
],
|
|
19905
|
+
overlaps: [
|
|
19906
|
+
{
|
|
19907
|
+
domains: ["future-foresight", "narrative-dynamics"],
|
|
19908
|
+
emergent_state: "Inspiration",
|
|
19909
|
+
description: "Visionary leaders inspire action by painting a vivid picture of a better future, helping people understand how to get there together. Emerges when long-range thinking meets language that rallies."
|
|
19910
|
+
},
|
|
19911
|
+
{
|
|
19912
|
+
domains: ["narrative-dynamics", "shared-prosperity"],
|
|
19913
|
+
emergent_state: "Trust",
|
|
19914
|
+
description: "Built through authentic storytelling and consistent delivery on promises, creating a community where contributors feel secure in their contributions. Emerges when clear intent meets fair distribution \u2014 coalitions form here."
|
|
19915
|
+
},
|
|
19916
|
+
{
|
|
19917
|
+
domains: ["shared-prosperity", "future-foresight"],
|
|
19918
|
+
emergent_state: "Hope",
|
|
19919
|
+
description: "Propels decentralized communities toward a collective future where resources are equitably distributed and success is shared by all. Emerges when long-term infrastructure is architected for collective benefit \u2014 the DePIN / Intercognitive posture."
|
|
19920
|
+
}
|
|
19921
|
+
],
|
|
19922
|
+
center_identity: "Collective Vanguard Leader",
|
|
19923
|
+
evaluation_questions: [
|
|
19924
|
+
"What long-range architectural thinking is present? Systems design, scenario planning, critical thinking, ethical judgment \u2014 which of these is visible, which is weak?",
|
|
19925
|
+
"What communication and meaning-making is happening? Storytelling, cultural sensitivity, audience engagement, persuasive writing \u2014 who is telling the story of how the pieces connect?",
|
|
19926
|
+
"What collaborative and fairness work is happening? Stakeholder management, partnership development, incentive alignment, community building \u2014 who is building coalitions and making sure value flows equitably?",
|
|
19927
|
+
"Which overlap states surface \u2014 Inspiration (vision + narrative), Trust (narrative + fairness), Hope (fairness + long-term thinking)?",
|
|
19928
|
+
"Is the integration complete (Collective Vanguard Leader manifests across all three dimensions) or is one dimension absent / weak?",
|
|
19929
|
+
"If one dimension is weak, what specific skill inside it is the lowest-friction activation point?"
|
|
19930
|
+
],
|
|
19931
|
+
scoring_rubric: `For any Auki activity, identify which specific skills are strongly present, which are weak, which are absent. Cite specific evidence for each. Name the overlap emergent states that surface using their plain-English names (Inspiration, Trust, Hope). Do not surface the bucket names (Future Foresight, Narrative Dynamics, Shared Prosperity) in the output \u2014 those are internal reasoning scaffolds, not reader-facing labels. Translate bucket-level findings into skill-level observations: not "Future Foresight is present" but "the architectural thinking is strong \u2014 the systems design is clear"; not "Shared Prosperity is weak" but "partnership development is missing" or "incentive alignment hasn't been established." Center identity (Collective Vanguard Leader) may be named sparingly, only when all three dimensions fully integrate.`,
|
|
19932
|
+
/**
|
|
19933
|
+
* The skills inside each domain. These are the OUTPUT-FACING vocabulary —
|
|
19934
|
+
* the observable behaviors and capabilities readers understand. When the
|
|
19935
|
+
* AI renders findings, it uses these skill names, not the bucket names.
|
|
19936
|
+
*
|
|
19937
|
+
* From Kirsten\'s original vanguard diagram (see exemplars/vanguard-diagram).
|
|
19938
|
+
*/
|
|
19939
|
+
domain_skills: {
|
|
19940
|
+
"future-foresight": [
|
|
19941
|
+
"strategic thinking",
|
|
19942
|
+
"systems design",
|
|
19943
|
+
"scenario planning",
|
|
19944
|
+
"futurism and trend analysis",
|
|
19945
|
+
"critical thinking",
|
|
19946
|
+
"innovative problem-solving",
|
|
19947
|
+
"data-driven decision-making",
|
|
19948
|
+
"ethical judgment and governance",
|
|
19949
|
+
"risk assessment and mitigation",
|
|
19950
|
+
"curiosity and open-mindedness"
|
|
19951
|
+
],
|
|
19952
|
+
"narrative-dynamics": [
|
|
19953
|
+
"storytelling and narrative crafting",
|
|
19954
|
+
"behavioral psychology and memetics",
|
|
19955
|
+
"emotional intelligence",
|
|
19956
|
+
"communication and presentation skills",
|
|
19957
|
+
"cultural sensitivity and adaptation",
|
|
19958
|
+
"social media and viral messaging strategy",
|
|
19959
|
+
"brand building and positioning",
|
|
19960
|
+
"persuasive writing",
|
|
19961
|
+
"visualization and design thinking",
|
|
19962
|
+
"audience analysis and engagement"
|
|
19963
|
+
],
|
|
19964
|
+
"shared-prosperity": [
|
|
19965
|
+
"stakeholder management",
|
|
19966
|
+
"collaborative leadership",
|
|
19967
|
+
"conflict resolution and mediation",
|
|
19968
|
+
"economic and tokenomic design",
|
|
19969
|
+
"incentive alignment",
|
|
19970
|
+
"community building and management",
|
|
19971
|
+
"inclusivity and equity advocacy",
|
|
19972
|
+
"partnership development",
|
|
19973
|
+
"transparency and accountability",
|
|
19974
|
+
"negotiation and diplomacy"
|
|
19975
|
+
]
|
|
19976
|
+
},
|
|
19977
|
+
/**
|
|
19978
|
+
* The translation rule: bucket names stay internal; skills + overlap
|
|
19979
|
+
* state names surface in output. This is enforced by both the
|
|
19980
|
+
* output-directive (guidance to the AI) and the forbidden-phrases list
|
|
19981
|
+
* (renderer-level rejection of any output leaking bucket names).
|
|
19982
|
+
*/
|
|
19983
|
+
output_translation: {
|
|
19984
|
+
never_surface_in_output: [
|
|
19985
|
+
"Future Foresight",
|
|
19986
|
+
"Narrative Dynamics",
|
|
19987
|
+
"Shared Prosperity"
|
|
19988
|
+
],
|
|
19989
|
+
surface_freely: [
|
|
19990
|
+
"Inspiration",
|
|
19991
|
+
"Trust",
|
|
19992
|
+
"Hope"
|
|
19993
|
+
// plus any specific skill name from domain_skills above
|
|
19994
|
+
],
|
|
19995
|
+
surface_sparingly: ["Collective Vanguard Leader"],
|
|
19996
|
+
translation_examples: [
|
|
19997
|
+
{
|
|
19998
|
+
internal_reasoning: "Future Foresight is strong",
|
|
19999
|
+
external_expression: "the architectural thinking is strong; the systems design is clear"
|
|
20000
|
+
},
|
|
20001
|
+
{
|
|
20002
|
+
internal_reasoning: "Shared Prosperity is weak",
|
|
20003
|
+
external_expression: "partnership development is missing; no one has established incentive alignment across teams"
|
|
20004
|
+
},
|
|
20005
|
+
{
|
|
20006
|
+
internal_reasoning: "Narrative Dynamics is absent",
|
|
20007
|
+
external_expression: "no one is telling the story of how these pieces connect; the audience does not see the shared vision yet"
|
|
20008
|
+
}
|
|
20009
|
+
]
|
|
20010
|
+
}
|
|
20011
|
+
};
|
|
20012
|
+
AUKI_VOCABULARY = {
|
|
20013
|
+
proper_nouns: [
|
|
20014
|
+
"$AUKI",
|
|
20015
|
+
"Posemesh",
|
|
20016
|
+
"Auki Labs",
|
|
20017
|
+
"Posemesh Foundation",
|
|
20018
|
+
"Intercognitive Foundation",
|
|
20019
|
+
"Intercognitive",
|
|
20020
|
+
"Sixth Protocol",
|
|
20021
|
+
"Fifth Protocol",
|
|
20022
|
+
"DePIN",
|
|
20023
|
+
"Cactus",
|
|
20024
|
+
"Terri",
|
|
20025
|
+
"Mech Jagger",
|
|
20026
|
+
"peaq",
|
|
20027
|
+
"Mawari",
|
|
20028
|
+
"GEODNET",
|
|
20029
|
+
"Nine Pillars of AI Accessibility",
|
|
20030
|
+
"the real world web",
|
|
20031
|
+
"the posemesh"
|
|
20032
|
+
],
|
|
20033
|
+
// Generic term → Auki-native replacement
|
|
20034
|
+
preferred: {
|
|
20035
|
+
device: "participant",
|
|
20036
|
+
client: "participant",
|
|
20037
|
+
"coordinate system": "domain",
|
|
20038
|
+
"QR code for calibration": "portal",
|
|
20039
|
+
"work request": "task",
|
|
20040
|
+
"location alignment": "calibrate",
|
|
20041
|
+
"sensor reading": "observation",
|
|
20042
|
+
"physical environment": "environment",
|
|
20043
|
+
"the network (public-facing)": "the real world web",
|
|
20044
|
+
"the network (technical)": "the posemesh",
|
|
20045
|
+
"coordination between devices": "spatial orchestration",
|
|
20046
|
+
"buying services": "burning tokens for credits",
|
|
20047
|
+
"full autonomy": "the full stack",
|
|
20048
|
+
"non-GPS environments": "GPS-denied environments",
|
|
20049
|
+
"our partners": "the Intercognitive coalition (Auki, peaq, Mawari, GEODNET)"
|
|
20050
|
+
},
|
|
20051
|
+
architecture: [
|
|
20052
|
+
"domain",
|
|
20053
|
+
"domain cluster",
|
|
20054
|
+
"domain manager",
|
|
20055
|
+
"domain owner",
|
|
20056
|
+
"semantic layer",
|
|
20057
|
+
"topography layer",
|
|
20058
|
+
"rendering layer",
|
|
20059
|
+
"partitions",
|
|
20060
|
+
"observations",
|
|
20061
|
+
"portals",
|
|
20062
|
+
"participant",
|
|
20063
|
+
"supply participant",
|
|
20064
|
+
"demand participant",
|
|
20065
|
+
"capabilities",
|
|
20066
|
+
"tasks",
|
|
20067
|
+
"discovery service",
|
|
20068
|
+
"DHT",
|
|
20069
|
+
"substrate",
|
|
20070
|
+
"spatial orchestration",
|
|
20071
|
+
"app-free navigation",
|
|
20072
|
+
"marker-free VPS",
|
|
20073
|
+
"spatially aware",
|
|
20074
|
+
"the stack",
|
|
20075
|
+
"the robotics stack",
|
|
20076
|
+
"GPS-denied",
|
|
20077
|
+
"locomotion",
|
|
20078
|
+
"manipulation",
|
|
20079
|
+
"spatio-semantic perception",
|
|
20080
|
+
"mapping",
|
|
20081
|
+
"positioning",
|
|
20082
|
+
"hybrid robotics",
|
|
20083
|
+
"AI copilot",
|
|
20084
|
+
"shared spatial layer"
|
|
20085
|
+
],
|
|
20086
|
+
economic: [
|
|
20087
|
+
"burn",
|
|
20088
|
+
"credit",
|
|
20089
|
+
"deflationary mint",
|
|
20090
|
+
"reputation",
|
|
20091
|
+
"vacancy",
|
|
20092
|
+
"treasury",
|
|
20093
|
+
"utilization rate",
|
|
20094
|
+
"initial supply",
|
|
20095
|
+
"total supply",
|
|
20096
|
+
"organization",
|
|
20097
|
+
"trustless",
|
|
20098
|
+
"peer-to-peer transactions",
|
|
20099
|
+
"machine passports",
|
|
20100
|
+
"machine economy"
|
|
20101
|
+
],
|
|
20102
|
+
framing: [
|
|
20103
|
+
"machine perception",
|
|
20104
|
+
"spatial computing",
|
|
20105
|
+
"collaborative perception",
|
|
20106
|
+
"cognitive liberty",
|
|
20107
|
+
"perception-first",
|
|
20108
|
+
"protocol-not-product",
|
|
20109
|
+
"sovereignty",
|
|
20110
|
+
"decentralization",
|
|
20111
|
+
"territory capture",
|
|
20112
|
+
"foundations-before-execution",
|
|
20113
|
+
"make the world machine-readable",
|
|
20114
|
+
"connective tissue between digital and physical",
|
|
20115
|
+
"open, permissionless, interoperable, private",
|
|
20116
|
+
"skip the bottleneck, ship the leverage",
|
|
20117
|
+
"coalition before standard",
|
|
20118
|
+
"hybrid over pure",
|
|
20119
|
+
"augmentation without surveillance",
|
|
20120
|
+
"civilization-scale infrastructure",
|
|
20121
|
+
"public good, not proprietary asset",
|
|
20122
|
+
"Inspiration",
|
|
20123
|
+
"Trust",
|
|
20124
|
+
"Hope",
|
|
20125
|
+
"Collective Vanguard Leader"
|
|
20126
|
+
],
|
|
20127
|
+
// System-internal concepts → plain English for output.
|
|
20128
|
+
// Readers don't know Radiant's vocabulary. Speaking it to them is jargon.
|
|
20129
|
+
jargon_translations: {
|
|
20130
|
+
"worldmodel": "your strategy file",
|
|
20131
|
+
"canonical pattern": "something Radiant tracks by name over time",
|
|
20132
|
+
"candidate pattern": "something Radiant noticed but hasn't been told to watch for",
|
|
20133
|
+
"evidence gate": "how much activity Radiant needs before it speaks",
|
|
20134
|
+
"invariant": "a rule you declared non-negotiable",
|
|
20135
|
+
"signal extraction": "reading the activity",
|
|
20136
|
+
"alignment score": "how aligned the work is with what you said matters",
|
|
20137
|
+
"actor domain": "who did the work (a person, an AI, or both together)",
|
|
20138
|
+
"presence-based averaging": "only counts what actually happened",
|
|
20139
|
+
"drift detection": "noticing when things are shifting from what you said you wanted",
|
|
20140
|
+
"lens rewrite": "framing adjustment before output",
|
|
20141
|
+
"INSUFFICIENT_EVIDENCE": "not enough to say confidently",
|
|
20142
|
+
"UNAVAILABLE": "we can't measure this yet"
|
|
20143
|
+
}
|
|
20144
|
+
};
|
|
20145
|
+
AUKI_VOICE = {
|
|
20146
|
+
register: 'diagnosis mode \u2014 compressed, strategic, builder-direct. Closer to the closing paragraph of an Auki year-recap ("2025 was foundations. 2026 is execution.") than to its month-by-month celebration.',
|
|
20147
|
+
active_voice: "required",
|
|
20148
|
+
specificity: "required",
|
|
20149
|
+
hype_vocabulary: "forbidden",
|
|
20150
|
+
hedging: "forbidden",
|
|
20151
|
+
playfulness: "rare",
|
|
20152
|
+
close_with_strategic_frame: "preferred",
|
|
20153
|
+
punchline_move: "sparing",
|
|
20154
|
+
honesty_about_failure: "required",
|
|
20155
|
+
output_translation: `Reason internally through the three-domain frame (Future Foresight, Narrative Dynamics, Shared Prosperity) \u2014 that is the analytical scaffold. Express findings externally in the skills vocabulary INSIDE each domain (e.g. "strategic thinking," "partnership development," "storytelling," "incentive alignment"). Use the overlap state names (Inspiration, Trust, Hope) as plain-English emergent feelings. Do NOT surface the bucket names themselves (Future Foresight, Narrative Dynamics, Shared Prosperity) as labels in output \u2014 they are the model-maker's scaffold, not reader vocabulary. Readers understand skills, not buckets. The bucket names are in the forbidden_phrases list; the renderer will fail output that leaks them. Collective Vanguard Leader may be named sparingly when all three dimensions are fully integrated.`
|
|
20156
|
+
};
|
|
20157
|
+
AUKI_FORBIDDEN_PHRASES = Object.freeze([
|
|
20158
|
+
// Domain bucket names — never surface to readers; translate to skills
|
|
20159
|
+
"future foresight",
|
|
20160
|
+
"narrative dynamics",
|
|
20161
|
+
"shared prosperity",
|
|
20162
|
+
// AI-assistant hedging
|
|
20163
|
+
"it may be beneficial to consider",
|
|
20164
|
+
"there appears to be",
|
|
20165
|
+
"one possible interpretation",
|
|
20166
|
+
"it might be worth exploring",
|
|
20167
|
+
"it might be worth considering",
|
|
20168
|
+
"consider whether",
|
|
20169
|
+
"it is worth noting",
|
|
20170
|
+
"please note that",
|
|
20171
|
+
"it should be noted",
|
|
20172
|
+
"in conclusion",
|
|
20173
|
+
// Corporate / marketing
|
|
20174
|
+
"unparalleled",
|
|
20175
|
+
"best-in-class",
|
|
20176
|
+
"industry-leading",
|
|
20177
|
+
"revolutionary",
|
|
20178
|
+
"cutting-edge",
|
|
20179
|
+
"state-of-the-art",
|
|
20180
|
+
"thrilled to announce",
|
|
20181
|
+
"excited to share",
|
|
20182
|
+
"game-changing",
|
|
20183
|
+
"synergy",
|
|
20184
|
+
"synergies",
|
|
20185
|
+
"stakeholders",
|
|
20186
|
+
// too corporate; prefer named actors
|
|
20187
|
+
"end-users",
|
|
20188
|
+
"value proposition",
|
|
20189
|
+
"paradigm shift",
|
|
20190
|
+
// Generic motion
|
|
20191
|
+
"going forward",
|
|
20192
|
+
"moving forward",
|
|
20193
|
+
"at the end of the day",
|
|
20194
|
+
"touching base",
|
|
20195
|
+
"circle back",
|
|
20196
|
+
"deep dive",
|
|
20197
|
+
"level set",
|
|
20198
|
+
"low-hanging fruit"
|
|
20199
|
+
]);
|
|
20200
|
+
AUKI_PREFERRED_PATTERNS = Object.freeze([
|
|
20201
|
+
// Direct declarative observation
|
|
20202
|
+
"[Specific skill] is strong here. [Named evidence].",
|
|
20203
|
+
"[Specific skill] is breaking here. [Named evidence].",
|
|
20204
|
+
"[Specific skill] is missing. [Named consequence].",
|
|
20205
|
+
// Skills-level diagnosis (replaces the bucket-speak pattern)
|
|
20206
|
+
"The [specific skill] is clear \u2014 [specific evidence]. But [another specific skill] is missing \u2014 [specific effect]. [Imperative move].",
|
|
20207
|
+
"What is missing is [specific skill], not effort.",
|
|
20208
|
+
"[Trust | Inspiration | Hope] won't emerge until [skill-A] and [skill-B] happen together.",
|
|
20209
|
+
// Imperative move
|
|
20210
|
+
"Force [action] or [consequence].",
|
|
20211
|
+
"Tighten this or it fragments.",
|
|
20212
|
+
"Skip the bottleneck, ship the leverage.",
|
|
20213
|
+
"Coalition before standard.",
|
|
20214
|
+
// Strategic close — list-becomes-argument (from year-recap)
|
|
20215
|
+
"Combine [A, B, C] and suddenly [strategic implication].",
|
|
20216
|
+
"[Phase A] was [what you built]. [Phase B] is [what you execute].",
|
|
20217
|
+
// Binary stakes (from Intercognitive)
|
|
20218
|
+
"[Centralize X in the hands of a few] or [build a decentralized alternative].",
|
|
20219
|
+
// Short thesis compression (from glossary)
|
|
20220
|
+
"[Subject] is [essential-function] \u2014 [one-line precision].",
|
|
20221
|
+
// Named specificity
|
|
20222
|
+
"[Named partner/place/number] is the one that matters here.",
|
|
20223
|
+
// Honest texture (from year-recap)
|
|
20224
|
+
"[Specific thing] is not yet [state] \u2014 [honest qualifier].",
|
|
20225
|
+
// Pivot to reality before solution (from hybrid robotics)
|
|
20226
|
+
"The truth is [current reality]. [Better approach] is [the move].",
|
|
20227
|
+
// Overlap-state compression (surfacing the emergent feel, not the buckets)
|
|
20228
|
+
"Trust is not emerging because [specific narrative skill] and [specific coalition skill] are not happening together.",
|
|
20229
|
+
"Inspiration is landing here \u2014 [specific evidence of vision + rally].",
|
|
20230
|
+
"Hope is present \u2014 [specific evidence of long-term thinking meeting fair distribution]."
|
|
20231
|
+
]);
|
|
20232
|
+
AUKI_STRATEGIC_PATTERNS = Object.freeze([
|
|
20233
|
+
"Skip the bottleneck, ship the leverage \u2014 identify the hard layers in the stack, build on the layers that deliver value now.",
|
|
20234
|
+
"Coalition before standard \u2014 form the group that will set the rules before the rules need to exist.",
|
|
20235
|
+
"Foundations before execution \u2014 build the infrastructure that makes the thing possible; then scale.",
|
|
20236
|
+
"Hybrid over pure \u2014 augment humans with AI where full autonomy is not ready; transition later.",
|
|
20237
|
+
"Decentralized > proprietary \u2014 when choosing architecture, prefer open / community-governed / interoperable over closed / owned / siloed.",
|
|
20238
|
+
"Layered analysis first, strategic move second \u2014 decompose before deciding.",
|
|
20239
|
+
'Named specificity over abstractions \u2014 cite people, places, partners, numbers; never "stakeholders" or "the industry."',
|
|
20240
|
+
"Community deployment before public release \u2014 validate with a small group of operators before opening the door.",
|
|
20241
|
+
"Cognitive liberty as inviolable constraint \u2014 block any move that violates sovereignty over spatial/sensor data, regardless of other benefits.",
|
|
20242
|
+
"Compress mission to one sentence \u2014 one memorable thesis carries more weight than a manifesto."
|
|
20243
|
+
]);
|
|
20244
|
+
AUKI_EXEMPLARS = Object.freeze([
|
|
20245
|
+
{
|
|
20246
|
+
path: "intercognitive-foundation.md",
|
|
20247
|
+
title: "The Intercognitive Foundation",
|
|
20248
|
+
exhibits: ["future-foresight", "narrative-dynamics", "shared-prosperity"],
|
|
20249
|
+
integration_quality: "full \u2014 all three domains integrated; Collective Vanguard Leader manifests through the coalition itself",
|
|
20250
|
+
notes: 'The perfect vanguard exemplar. Future Foresight: inflection-point framing, Nine Pillars architecture. Narrative Dynamics: "the physical world cannot remain a blind spot," rally language, invitation to join. Shared Prosperity: coalition of four founding members, "no single entity should own," community governance, public good framing. When Radiant outputs something that feels vanguard-complete, it should resemble this in structure and tone.'
|
|
20251
|
+
},
|
|
20252
|
+
{
|
|
20253
|
+
path: "hybrid-robotics-essay.md",
|
|
20254
|
+
title: "The Case for Hybrid Robotics",
|
|
20255
|
+
exhibits: ["future-foresight", "shared-prosperity"],
|
|
20256
|
+
integration_quality: "partial \u2014 Future Foresight dominant, Shared Prosperity secondary, Narrative Dynamics present but informing rather than rallying. Overlap: Hope emerges (long-horizon infrastructure for collective benefit).",
|
|
20257
|
+
notes: 'Auki teaching how it thinks. The stack-analysis \u2192 bottleneck-identification \u2192 skip-and-ship pattern is a reusable Auki reasoning move. When the AI applies "systems-first" and "leverage-oriented" thinking, it should resemble this essay \u2014 structured, honest about current reality, pivoting to a better approach via layered reasoning.'
|
|
20258
|
+
},
|
|
20259
|
+
{
|
|
20260
|
+
path: "glossary.md",
|
|
20261
|
+
title: "Auki Glossary",
|
|
20262
|
+
exhibits: ["future-foresight"],
|
|
20263
|
+
integration_quality: 'primary-dominant \u2014 Future Foresight dominant (precise technical definitions as long-range conceptual infrastructure). Shared Prosperity implicit (glossary is open, cross-referenced, serves the ecosystem). Narrative Dynamics flashes once ("a mesh of machines reasoning about pose") but is not primary.',
|
|
20264
|
+
notes: 'Source of the vocabulary map. Also teaches compression style: one-line precision definitions, cross-reference density, occasional poetic compression. When the renderer produces short thesis sentences, aim for the "mesh of machines reasoning about pose" level of compression.'
|
|
20265
|
+
},
|
|
20266
|
+
{
|
|
20267
|
+
path: "year-recap-2025.md",
|
|
20268
|
+
title: "Auki 2025 Year-End Recap",
|
|
20269
|
+
exhibits: ["narrative-dynamics", "shared-prosperity"],
|
|
20270
|
+
integration_quality: "partial \u2014 Narrative Dynamics dominant, Shared Prosperity strong, Future Foresight arrives only in the closing paragraph. Overlap: Trust emerges (stakeholders can see their place in the collective progress).",
|
|
20271
|
+
notes: 'The celebration register \u2014 warm, specific, named. Not the diagnosis register the lens primarily enforces, but the same DNA. Use this exemplar when calibrating how Auki names specifics (Pepito in Bali, Mika Haak at HQ, the HK web3 robotics cabal) and how the "\u2014 literally" punchline move lands. Do NOT mimic the celebration warmth in diagnosis outputs.'
|
|
20272
|
+
}
|
|
20273
|
+
]);
|
|
20274
|
+
aukiBuilderLens = {
|
|
20275
|
+
name: "auki-builder",
|
|
20276
|
+
description: "Renders behavioral interpretation through the vanguard leadership model \u2014 Future Foresight, Narrative Dynamics, Shared Prosperity. Role-based, not personal. Encodes how Auki-grade builders think and speak when the vanguard model is running. Companion to auki-vanguard.worldmodel.md (the abstract DNA) and the exemplars at src/radiant/examples/auki/exemplars/ (worked implementations).",
|
|
20277
|
+
primary_frame: {
|
|
20278
|
+
domains: AUKI_VANGUARD_FRAME.domains,
|
|
20279
|
+
overlaps: AUKI_VANGUARD_FRAME.overlaps,
|
|
20280
|
+
center_identity: AUKI_VANGUARD_FRAME.center_identity,
|
|
20281
|
+
evaluation_questions: AUKI_VANGUARD_FRAME.evaluation_questions,
|
|
20282
|
+
scoring_rubric: AUKI_VANGUARD_FRAME.scoring_rubric
|
|
20283
|
+
},
|
|
20284
|
+
vocabulary: AUKI_VOCABULARY,
|
|
20285
|
+
voice: AUKI_VOICE,
|
|
20286
|
+
forbidden_phrases: AUKI_FORBIDDEN_PHRASES,
|
|
20287
|
+
preferred_patterns: AUKI_PREFERRED_PATTERNS,
|
|
20288
|
+
strategic_patterns: AUKI_STRATEGIC_PATTERNS,
|
|
20289
|
+
exemplar_refs: AUKI_EXEMPLARS,
|
|
20290
|
+
rewrite: aukiBuilderRewrite
|
|
20291
|
+
};
|
|
20292
|
+
}
|
|
20293
|
+
});
|
|
20294
|
+
|
|
20295
|
+
// src/radiant/lenses/index.ts
|
|
20296
|
+
var lenses_exports = {};
|
|
20297
|
+
__export(lenses_exports, {
|
|
20298
|
+
LENSES: () => LENSES,
|
|
20299
|
+
aukiBuilderLens: () => aukiBuilderLens,
|
|
20300
|
+
getLens: () => getLens2,
|
|
20301
|
+
listLenses: () => listLenses
|
|
20302
|
+
});
|
|
20303
|
+
function getLens2(id) {
|
|
20304
|
+
return LENSES[id];
|
|
20305
|
+
}
|
|
20306
|
+
function listLenses() {
|
|
20307
|
+
return Object.freeze(Object.keys(LENSES));
|
|
20308
|
+
}
|
|
20309
|
+
var LENSES;
|
|
20310
|
+
var init_lenses = __esm({
|
|
20311
|
+
"src/radiant/lenses/index.ts"() {
|
|
20312
|
+
"use strict";
|
|
20313
|
+
init_auki_builder();
|
|
20314
|
+
init_auki_builder();
|
|
20315
|
+
LENSES = Object.freeze({
|
|
20316
|
+
"auki-builder": aukiBuilderLens
|
|
20317
|
+
});
|
|
20318
|
+
}
|
|
20319
|
+
});
|
|
20320
|
+
|
|
20321
|
+
// src/radiant/core/prompt.ts
|
|
20322
|
+
function composeSystemPrompt(worldmodelContent, lens) {
|
|
20323
|
+
const sections = [];
|
|
20324
|
+
sections.push(
|
|
20325
|
+
`## Worldmodel
|
|
20326
|
+
|
|
20327
|
+
You are operating inside a governed environment. The worldmodel below
|
|
20328
|
+
defines the invariants, signals, decision priorities, and behavioral
|
|
20329
|
+
expectations for this organization. Every response you produce must
|
|
20330
|
+
be grounded in this worldmodel.
|
|
20331
|
+
|
|
20332
|
+
` + worldmodelContent
|
|
20333
|
+
);
|
|
20334
|
+
const frame = lens.primary_frame;
|
|
20335
|
+
const questionsBlock = frame.evaluation_questions.map((q, i) => `${i + 1}. ${q}`).join("\n");
|
|
20336
|
+
const overlapsBlock = frame.overlaps.map(
|
|
20337
|
+
(o) => `- ${o.domains[0]} + ${o.domains[1]} = **${o.emergent_state}**: ${o.description}`
|
|
20338
|
+
).join("\n");
|
|
20339
|
+
sections.push(
|
|
20340
|
+
`## How to Think (Analytical Frame: ${lens.name})
|
|
20341
|
+
|
|
20342
|
+
${frame.scoring_rubric}
|
|
20343
|
+
|
|
20344
|
+
### Evaluation questions to reason through
|
|
20345
|
+
|
|
20346
|
+
${questionsBlock}
|
|
20347
|
+
|
|
20348
|
+
### Overlap emergent states
|
|
20349
|
+
|
|
20350
|
+
${overlapsBlock}
|
|
20351
|
+
|
|
20352
|
+
### Center identity
|
|
20353
|
+
|
|
20354
|
+
When all dimensions integrate fully: **${frame.center_identity}**. Surface this sparingly \u2014 only when the integration is genuinely complete.`
|
|
20355
|
+
);
|
|
20356
|
+
const vocabPreferred = Object.entries(lens.vocabulary.preferred).map(([generic, native]) => `- "${generic}" \u2192 **${native}**`).join("\n");
|
|
20357
|
+
const vocabArchitecture = lens.vocabulary.architecture.map((t) => `\`${t}\``).join(", ");
|
|
20358
|
+
const vocabProperNouns = lens.vocabulary.proper_nouns.map((n) => `**${n}**`).join(", ");
|
|
20359
|
+
const strategicBlock = lens.strategic_patterns.map((p) => `- ${p}`).join("\n");
|
|
20360
|
+
sections.push(
|
|
20361
|
+
`## How to Speak (Voice: ${lens.name})
|
|
20362
|
+
|
|
20363
|
+
Register: ${lens.voice.register}
|
|
20364
|
+
|
|
20365
|
+
Rules:
|
|
20366
|
+
- Active voice: ${lens.voice.active_voice}
|
|
20367
|
+
- Named specificity (people, places, numbers): ${lens.voice.specificity}
|
|
20368
|
+
- Hype vocabulary: ${lens.voice.hype_vocabulary}
|
|
20369
|
+
- Hedging / qualified phrasing: ${lens.voice.hedging}
|
|
20370
|
+
- Playfulness: ${lens.voice.playfulness}
|
|
20371
|
+
- Close with strategic frame: ${lens.voice.close_with_strategic_frame}
|
|
20372
|
+
- Honesty about failure: ${lens.voice.honesty_about_failure}
|
|
20373
|
+
|
|
20374
|
+
### Output translation discipline
|
|
20375
|
+
|
|
20376
|
+
${lens.voice.output_translation}
|
|
20377
|
+
|
|
20378
|
+
### Vocabulary
|
|
20379
|
+
|
|
20380
|
+
Proper nouns (use literally): ${vocabProperNouns}
|
|
20381
|
+
|
|
20382
|
+
Preferred term substitutions:
|
|
20383
|
+
${vocabPreferred}
|
|
20384
|
+
|
|
20385
|
+
Architecture vocabulary: ${vocabArchitecture}
|
|
20386
|
+
|
|
20387
|
+
### Strategic decision patterns
|
|
20388
|
+
|
|
20389
|
+
When recommending action, these patterns reflect how this organization resolves tradeoffs:
|
|
20390
|
+
|
|
20391
|
+
${strategicBlock}`
|
|
20392
|
+
);
|
|
20393
|
+
const forbiddenBlock = lens.forbidden_phrases.map((p) => `- "${p}"`).join("\n");
|
|
20394
|
+
sections.push(
|
|
20395
|
+
`## Guardrails
|
|
20396
|
+
|
|
20397
|
+
Do NOT use any of these phrases in your response. If you catch yourself
|
|
20398
|
+
reaching for one, rephrase in direct, active, specific language instead.
|
|
20399
|
+
|
|
20400
|
+
${forbiddenBlock}
|
|
20401
|
+
|
|
20402
|
+
If your response would violate a worldmodel invariant, state the conflict
|
|
20403
|
+
explicitly and propose an alternative that honors the invariant.`
|
|
20404
|
+
);
|
|
20405
|
+
return sections.join("\n\n---\n\n");
|
|
20406
|
+
}
|
|
20407
|
+
var init_prompt = __esm({
|
|
20408
|
+
"src/radiant/core/prompt.ts"() {
|
|
20409
|
+
"use strict";
|
|
20410
|
+
}
|
|
20411
|
+
});
|
|
20412
|
+
|
|
20413
|
+
// src/radiant/core/voice-check.ts
|
|
20414
|
+
function checkForbiddenPhrases(lens, text) {
|
|
20415
|
+
const lower = text.toLowerCase();
|
|
20416
|
+
const violations = [];
|
|
20417
|
+
for (const phrase of lens.forbidden_phrases) {
|
|
20418
|
+
const phraseLower = phrase.toLowerCase();
|
|
20419
|
+
let pos = 0;
|
|
20420
|
+
while (true) {
|
|
20421
|
+
const idx = lower.indexOf(phraseLower, pos);
|
|
20422
|
+
if (idx === -1) break;
|
|
20423
|
+
violations.push({ phrase, offset: idx });
|
|
20424
|
+
pos = idx + phraseLower.length;
|
|
20425
|
+
}
|
|
20426
|
+
}
|
|
20427
|
+
violations.sort((a, b) => a.offset - b.offset);
|
|
20428
|
+
return violations;
|
|
20429
|
+
}
|
|
20430
|
+
var init_voice_check = __esm({
|
|
20431
|
+
"src/radiant/core/voice-check.ts"() {
|
|
20432
|
+
"use strict";
|
|
20433
|
+
}
|
|
20434
|
+
});
|
|
20435
|
+
|
|
20436
|
+
// src/radiant/commands/think.ts
|
|
20437
|
+
async function think(input) {
|
|
20438
|
+
const lens = resolveLens(input.lensId);
|
|
20439
|
+
const systemPrompt = composeSystemPrompt(input.worldmodelContent, lens);
|
|
20440
|
+
const response = await input.ai.complete(systemPrompt, input.query);
|
|
20441
|
+
const voiceViolations = checkForbiddenPhrases(lens, response);
|
|
20442
|
+
return {
|
|
20443
|
+
response,
|
|
20444
|
+
lens: lens.name,
|
|
20445
|
+
voiceViolations,
|
|
20446
|
+
voiceClean: voiceViolations.length === 0,
|
|
20447
|
+
systemPrompt
|
|
20448
|
+
};
|
|
20449
|
+
}
|
|
20450
|
+
function resolveLens(id) {
|
|
20451
|
+
const lens = getLens2(id);
|
|
20452
|
+
if (!lens) {
|
|
20453
|
+
const available = Object.keys(
|
|
20454
|
+
// Inline import-free way to list. At runtime, getLens returns from
|
|
20455
|
+
// the same LENSES record — we just need the keys for the error message.
|
|
20456
|
+
// We re-import getLens from lenses/index which exposes listLenses, but
|
|
20457
|
+
// since we already have lens===undefined we know the id was wrong.
|
|
20458
|
+
{}
|
|
20459
|
+
);
|
|
20460
|
+
throw new Error(
|
|
20461
|
+
`Lens "${id}" not found. Check the id or register the lens in src/radiant/lenses/index.ts.`
|
|
20462
|
+
);
|
|
20463
|
+
}
|
|
20464
|
+
return lens;
|
|
20465
|
+
}
|
|
20466
|
+
var init_think = __esm({
|
|
20467
|
+
"src/radiant/commands/think.ts"() {
|
|
20468
|
+
"use strict";
|
|
20469
|
+
init_lenses();
|
|
20470
|
+
init_prompt();
|
|
20471
|
+
init_voice_check();
|
|
20472
|
+
}
|
|
20473
|
+
});
|
|
20474
|
+
|
|
20475
|
+
// src/radiant/core/scopes.ts
|
|
20476
|
+
function parseRepoScope(scope) {
|
|
20477
|
+
const cleaned = scope.replace(/^https?:\/\//, "").replace(/^github\.com\//, "").replace(/\.git$/, "").replace(/\/$/, "");
|
|
20478
|
+
const parts = cleaned.split("/");
|
|
20479
|
+
if (parts.length < 2 || !parts[0] || !parts[1]) {
|
|
20480
|
+
throw new Error(
|
|
20481
|
+
`Cannot parse repo scope: "${scope}". Expected "owner/repo" or a GitHub URL.`
|
|
20482
|
+
);
|
|
20483
|
+
}
|
|
20484
|
+
return { owner: parts[0], repo: parts[1] };
|
|
20485
|
+
}
|
|
20486
|
+
function formatScope(scope) {
|
|
20487
|
+
return `${scope.owner}/${scope.repo}`;
|
|
20488
|
+
}
|
|
20489
|
+
var init_scopes = __esm({
|
|
20490
|
+
"src/radiant/core/scopes.ts"() {
|
|
20491
|
+
"use strict";
|
|
20492
|
+
}
|
|
20493
|
+
});
|
|
20494
|
+
|
|
20495
|
+
// src/radiant/adapters/github.ts
|
|
20496
|
+
async function fetchGitHubActivity(scope, token, options = {}) {
|
|
20497
|
+
const windowDays = options.windowDays ?? 14;
|
|
20498
|
+
const perPage = options.perPage ?? 100;
|
|
20499
|
+
const since = new Date(
|
|
20500
|
+
Date.now() - windowDays * 24 * 60 * 60 * 1e3
|
|
20501
|
+
).toISOString();
|
|
20502
|
+
const base = `https://api.github.com/repos/${formatScope(scope)}`;
|
|
20503
|
+
const headers = {
|
|
20504
|
+
Authorization: `token ${token}`,
|
|
20505
|
+
Accept: "application/vnd.github.v3+json",
|
|
20506
|
+
"User-Agent": "neuroverseos-radiant"
|
|
20507
|
+
};
|
|
20508
|
+
const events = [];
|
|
20509
|
+
const [commits, prs, comments] = await Promise.all([
|
|
20510
|
+
fetchJSON(
|
|
20511
|
+
`${base}/commits?since=${since}&per_page=${perPage}`,
|
|
20512
|
+
headers
|
|
20513
|
+
),
|
|
20514
|
+
fetchJSON(
|
|
20515
|
+
`${base}/pulls?state=all&sort=updated&direction=desc&per_page=${perPage}`,
|
|
20516
|
+
headers
|
|
20517
|
+
),
|
|
20518
|
+
fetchJSON(
|
|
20519
|
+
`${base}/issues/comments?since=${since}&per_page=${perPage}&sort=updated&direction=desc`,
|
|
20520
|
+
headers
|
|
20521
|
+
)
|
|
20522
|
+
]);
|
|
20523
|
+
for (const c of commits) {
|
|
20524
|
+
events.push(mapCommit(c, scope));
|
|
20525
|
+
}
|
|
20526
|
+
const sinceDate = new Date(since);
|
|
20527
|
+
for (const pr of prs) {
|
|
20528
|
+
if (new Date(pr.updated_at) >= sinceDate) {
|
|
20529
|
+
events.push(mapPR(pr, scope));
|
|
20530
|
+
}
|
|
20531
|
+
}
|
|
20532
|
+
for (const comment of comments) {
|
|
20533
|
+
events.push(mapComment(comment, scope));
|
|
20534
|
+
}
|
|
20535
|
+
events.sort(
|
|
20536
|
+
(a, b) => Date.parse(a.timestamp) - Date.parse(b.timestamp)
|
|
20537
|
+
);
|
|
20538
|
+
return events;
|
|
20539
|
+
}
|
|
20540
|
+
function mapCommit(c, scope) {
|
|
20541
|
+
const actor = mapUser(c.author, c.commit.author.name);
|
|
20542
|
+
const coActors = extractCoAuthors(c.commit.message);
|
|
20543
|
+
return {
|
|
20544
|
+
id: `commit-${c.sha.slice(0, 8)}`,
|
|
20545
|
+
timestamp: c.commit.author.date,
|
|
20546
|
+
actor,
|
|
20547
|
+
coActors: coActors.length > 0 ? coActors : void 0,
|
|
20548
|
+
kind: "commit",
|
|
20549
|
+
content: c.commit.message,
|
|
20550
|
+
metadata: {
|
|
20551
|
+
scope: formatScope(scope),
|
|
20552
|
+
sha: c.sha
|
|
20553
|
+
}
|
|
20554
|
+
};
|
|
20555
|
+
}
|
|
20556
|
+
function mapPR(pr, scope) {
|
|
20557
|
+
const event = {
|
|
20558
|
+
id: `pr-${pr.number}`,
|
|
20559
|
+
timestamp: pr.created_at,
|
|
20560
|
+
actor: mapUser(pr.user),
|
|
20561
|
+
kind: pr.merged_at ? "pr_merged" : pr.state === "open" ? "pr_opened" : "pr_closed",
|
|
20562
|
+
content: `${pr.title}
|
|
20563
|
+
|
|
20564
|
+
${pr.body ?? ""}`.trim(),
|
|
20565
|
+
metadata: {
|
|
20566
|
+
scope: formatScope(scope),
|
|
20567
|
+
pr_number: pr.number,
|
|
20568
|
+
state: pr.state,
|
|
20569
|
+
merged_at: pr.merged_at
|
|
20570
|
+
}
|
|
20571
|
+
};
|
|
20572
|
+
if (pr.merged_by && pr.merged_by.login !== pr.user.login) {
|
|
20573
|
+
event.actor = mapUser(pr.merged_by);
|
|
20574
|
+
event.kind = "pr_merged";
|
|
20575
|
+
event.timestamp = pr.merged_at ?? pr.updated_at;
|
|
20576
|
+
event.respondsTo = {
|
|
20577
|
+
eventId: `pr-${pr.number}-opened`,
|
|
20578
|
+
actor: mapUser(pr.user)
|
|
20579
|
+
};
|
|
20580
|
+
}
|
|
20581
|
+
return event;
|
|
20582
|
+
}
|
|
20583
|
+
function mapComment(comment, scope) {
|
|
20584
|
+
const issueMatch = comment.issue_url.match(/\/issues\/(\d+)$/);
|
|
20585
|
+
const issueNumber = issueMatch ? issueMatch[1] : "unknown";
|
|
20586
|
+
const event = {
|
|
20587
|
+
id: `comment-${comment.id}`,
|
|
20588
|
+
timestamp: comment.created_at,
|
|
20589
|
+
actor: mapUser(comment.user),
|
|
20590
|
+
kind: "comment",
|
|
20591
|
+
content: comment.body,
|
|
20592
|
+
respondsTo: {
|
|
20593
|
+
eventId: `pr-${issueNumber}`,
|
|
20594
|
+
actor: { id: "unknown", kind: "unknown" }
|
|
20595
|
+
},
|
|
20596
|
+
metadata: {
|
|
20597
|
+
scope: formatScope(scope),
|
|
20598
|
+
issue_number: issueNumber
|
|
20599
|
+
}
|
|
20600
|
+
};
|
|
20601
|
+
return event;
|
|
20602
|
+
}
|
|
20603
|
+
function mapUser(ghUser, fallbackName) {
|
|
20604
|
+
if (!ghUser) {
|
|
20605
|
+
return {
|
|
20606
|
+
id: fallbackName ?? "unknown",
|
|
20607
|
+
kind: "unknown",
|
|
20608
|
+
name: fallbackName
|
|
20609
|
+
};
|
|
20610
|
+
}
|
|
20611
|
+
let kind = "human";
|
|
20612
|
+
if (ghUser.type === "Bot" || ghUser.login.endsWith("[bot]")) {
|
|
20613
|
+
kind = "bot";
|
|
20614
|
+
}
|
|
20615
|
+
if (KNOWN_AI_LOGINS.has(ghUser.login.toLowerCase())) {
|
|
20616
|
+
kind = "bot";
|
|
20617
|
+
}
|
|
20618
|
+
return {
|
|
20619
|
+
id: ghUser.login,
|
|
20620
|
+
kind,
|
|
20621
|
+
name: ghUser.login
|
|
20622
|
+
};
|
|
20623
|
+
}
|
|
20624
|
+
function extractCoAuthors(message) {
|
|
20625
|
+
const coAuthors = [];
|
|
20626
|
+
const lines = message.split("\n");
|
|
20627
|
+
for (const line of lines) {
|
|
20628
|
+
const match = line.match(
|
|
20629
|
+
/^Co-authored-by:\s*(.+?)\s*<([^>]*)>/i
|
|
20630
|
+
);
|
|
20631
|
+
if (match) {
|
|
20632
|
+
const name = match[1].trim().toLowerCase();
|
|
20633
|
+
const isAI = KNOWN_AI_CO_AUTHOR_NAMES.has(name) || [...KNOWN_AI_CO_AUTHOR_NAMES].some((ai) => name.includes(ai));
|
|
20634
|
+
coAuthors.push({
|
|
20635
|
+
id: match[2] || name,
|
|
20636
|
+
kind: isAI ? "ai" : "human",
|
|
20637
|
+
name: match[1].trim()
|
|
20638
|
+
});
|
|
20639
|
+
}
|
|
20640
|
+
}
|
|
20641
|
+
return coAuthors;
|
|
20642
|
+
}
|
|
20643
|
+
async function fetchJSON(url, headers) {
|
|
20644
|
+
const res = await fetch(url, { headers });
|
|
20645
|
+
if (!res.ok) {
|
|
20646
|
+
if (res.status === 404) return [];
|
|
20647
|
+
if (res.status === 403) {
|
|
20648
|
+
const body = await res.text();
|
|
20649
|
+
if (body.includes("rate limit")) {
|
|
20650
|
+
throw new Error(
|
|
20651
|
+
`GitHub API rate limit exceeded. Wait or use a token with higher limits.`
|
|
20652
|
+
);
|
|
20653
|
+
}
|
|
20654
|
+
}
|
|
20655
|
+
throw new Error(
|
|
20656
|
+
`GitHub API error ${res.status} for ${url}: ${(await res.text()).slice(0, 300)}`
|
|
20657
|
+
);
|
|
20658
|
+
}
|
|
20659
|
+
return await res.json();
|
|
20660
|
+
}
|
|
20661
|
+
var KNOWN_AI_LOGINS, KNOWN_AI_CO_AUTHOR_NAMES;
|
|
20662
|
+
var init_github2 = __esm({
|
|
20663
|
+
"src/radiant/adapters/github.ts"() {
|
|
20664
|
+
"use strict";
|
|
20665
|
+
init_scopes();
|
|
20666
|
+
KNOWN_AI_LOGINS = /* @__PURE__ */ new Set([
|
|
20667
|
+
"github-actions[bot]",
|
|
20668
|
+
"dependabot[bot]",
|
|
20669
|
+
"renovate[bot]",
|
|
20670
|
+
"copilot"
|
|
20671
|
+
]);
|
|
20672
|
+
KNOWN_AI_CO_AUTHOR_NAMES = /* @__PURE__ */ new Set([
|
|
20673
|
+
"claude",
|
|
20674
|
+
"copilot",
|
|
20675
|
+
"cursor",
|
|
20676
|
+
"codeium",
|
|
20677
|
+
"tabnine",
|
|
20678
|
+
"codex"
|
|
20679
|
+
]);
|
|
20680
|
+
}
|
|
20681
|
+
});
|
|
20682
|
+
|
|
20683
|
+
// src/radiant/adapters/exocortex.ts
|
|
20684
|
+
function readExocortex(dirPath) {
|
|
20685
|
+
const dir = (0, import_path13.resolve)(dirPath);
|
|
20686
|
+
let filesLoaded = 0;
|
|
20687
|
+
function tryRead(...paths) {
|
|
20688
|
+
for (const p of paths) {
|
|
20689
|
+
const full = (0, import_path13.join)(dir, p);
|
|
20690
|
+
if ((0, import_fs12.existsSync)(full)) {
|
|
20691
|
+
try {
|
|
20692
|
+
const content = (0, import_fs12.readFileSync)(full, "utf-8").trim();
|
|
20693
|
+
if (content) {
|
|
20694
|
+
filesLoaded++;
|
|
20695
|
+
return content;
|
|
20696
|
+
}
|
|
20697
|
+
} catch {
|
|
20698
|
+
}
|
|
20699
|
+
}
|
|
20700
|
+
}
|
|
20701
|
+
return null;
|
|
20702
|
+
}
|
|
20703
|
+
const ctx = {
|
|
20704
|
+
attention: tryRead("attention.md"),
|
|
20705
|
+
goals: tryRead("goals.md"),
|
|
20706
|
+
identity: tryRead("identity.md"),
|
|
20707
|
+
sprint: tryRead("sprint.md", "src/sprint.md"),
|
|
20708
|
+
organization: tryRead("org/organization.md", "org/src/organization.md"),
|
|
20709
|
+
methods: tryRead("org/methods.md", "org/src/methods.md"),
|
|
20710
|
+
source: dir,
|
|
20711
|
+
filesLoaded
|
|
20712
|
+
};
|
|
20713
|
+
return ctx;
|
|
20714
|
+
}
|
|
20715
|
+
function formatExocortexForPrompt(ctx) {
|
|
20716
|
+
if (ctx.filesLoaded === 0) return "";
|
|
20717
|
+
const sections = [];
|
|
20718
|
+
sections.push(
|
|
20719
|
+
"## Stated Intent (from exocortex)\n\nThe following is what the person/team SAYS they are doing, focused on, and working toward. Compare this against the ACTUAL activity from GitHub. Where stated intent and observed behavior diverge, that gap is the most valuable signal in this read. Name it directly."
|
|
20720
|
+
);
|
|
20721
|
+
if (ctx.attention) {
|
|
20722
|
+
sections.push(`### Current attention
|
|
20723
|
+
|
|
20724
|
+
${ctx.attention}`);
|
|
20725
|
+
}
|
|
20726
|
+
if (ctx.goals) {
|
|
20727
|
+
sections.push(`### Goals
|
|
20728
|
+
|
|
20729
|
+
${ctx.goals}`);
|
|
20730
|
+
}
|
|
20731
|
+
if (ctx.sprint) {
|
|
20732
|
+
sections.push(`### Sprint focus
|
|
20733
|
+
|
|
20734
|
+
${ctx.sprint}`);
|
|
20735
|
+
}
|
|
20736
|
+
if (ctx.identity) {
|
|
20737
|
+
sections.push(`### Identity and values
|
|
20738
|
+
|
|
20739
|
+
${ctx.identity}`);
|
|
20740
|
+
}
|
|
20741
|
+
if (ctx.organization) {
|
|
20742
|
+
sections.push(`### Organization
|
|
20743
|
+
|
|
20744
|
+
${ctx.organization}`);
|
|
20745
|
+
}
|
|
20746
|
+
if (ctx.methods) {
|
|
20747
|
+
sections.push(`### Methods
|
|
20748
|
+
|
|
20749
|
+
${ctx.methods}`);
|
|
20750
|
+
}
|
|
20751
|
+
return sections.join("\n\n");
|
|
20752
|
+
}
|
|
20753
|
+
function summarizeExocortex(ctx) {
|
|
20754
|
+
if (ctx.filesLoaded === 0) return "no exocortex files found";
|
|
20755
|
+
const loaded = [];
|
|
20756
|
+
if (ctx.attention) loaded.push("attention");
|
|
20757
|
+
if (ctx.goals) loaded.push("goals");
|
|
20758
|
+
if (ctx.sprint) loaded.push("sprint");
|
|
20759
|
+
if (ctx.identity) loaded.push("identity");
|
|
20760
|
+
if (ctx.organization) loaded.push("org");
|
|
20761
|
+
if (ctx.methods) loaded.push("methods");
|
|
20762
|
+
return `${loaded.join(", ")} (${ctx.filesLoaded} files)`;
|
|
20763
|
+
}
|
|
20764
|
+
var import_fs12, import_path13;
|
|
20765
|
+
var init_exocortex = __esm({
|
|
20766
|
+
"src/radiant/adapters/exocortex.ts"() {
|
|
20767
|
+
"use strict";
|
|
20768
|
+
import_fs12 = require("fs");
|
|
20769
|
+
import_path13 = require("path");
|
|
20770
|
+
}
|
|
20771
|
+
});
|
|
20772
|
+
|
|
20773
|
+
// src/radiant/core/domain.ts
|
|
20774
|
+
function isLifeSide(k) {
|
|
20775
|
+
return k === "human" || k === "unknown";
|
|
20776
|
+
}
|
|
20777
|
+
function isCyberSide(k) {
|
|
20778
|
+
return k === "ai" || k === "bot";
|
|
20779
|
+
}
|
|
20780
|
+
function crossesBoundary(a, b) {
|
|
20781
|
+
return isLifeSide(a) && isCyberSide(b) || isCyberSide(a) && isLifeSide(b);
|
|
20782
|
+
}
|
|
20783
|
+
function classifyActorDomain(event) {
|
|
20784
|
+
const primaryKind = event.actor.kind;
|
|
20785
|
+
const coKinds = (event.coActors ?? []).map((a) => a.kind);
|
|
20786
|
+
const allKinds = [primaryKind, ...coKinds];
|
|
20787
|
+
const hasLife = allKinds.some(isLifeSide);
|
|
20788
|
+
const hasCyber = allKinds.some(isCyberSide);
|
|
20789
|
+
if (hasLife && hasCyber) {
|
|
20790
|
+
return "joint";
|
|
20791
|
+
}
|
|
20792
|
+
if (event.respondsTo && crossesBoundary(primaryKind, event.respondsTo.actor.kind)) {
|
|
20793
|
+
return "joint";
|
|
20794
|
+
}
|
|
20795
|
+
return isCyberSide(primaryKind) ? "cyber" : "life";
|
|
20796
|
+
}
|
|
20797
|
+
var init_domain = __esm({
|
|
20798
|
+
"src/radiant/core/domain.ts"() {
|
|
20799
|
+
"use strict";
|
|
20800
|
+
}
|
|
20801
|
+
});
|
|
20802
|
+
|
|
20803
|
+
// src/radiant/core/signals.ts
|
|
20804
|
+
function classifyEvents(events) {
|
|
20805
|
+
return events.map((event) => ({
|
|
20806
|
+
event,
|
|
20807
|
+
domain: classifyActorDomain(event)
|
|
20808
|
+
}));
|
|
20809
|
+
}
|
|
20810
|
+
function extractSignals(events, extractors = DEFAULT_SIGNAL_EXTRACTORS) {
|
|
20811
|
+
const domains = ["life", "cyber", "joint"];
|
|
20812
|
+
const out = [];
|
|
20813
|
+
for (const extractor of extractors) {
|
|
20814
|
+
for (const domain of domains) {
|
|
20815
|
+
const r = extractor.extract(events, domain);
|
|
20816
|
+
out.push({
|
|
20817
|
+
id: extractor.id,
|
|
20818
|
+
domain,
|
|
20819
|
+
score: r.score,
|
|
20820
|
+
eventCount: r.eventCount,
|
|
20821
|
+
confidence: r.confidence
|
|
20822
|
+
});
|
|
20823
|
+
}
|
|
20824
|
+
}
|
|
20825
|
+
return out;
|
|
20826
|
+
}
|
|
20827
|
+
function inDomain(events, domain) {
|
|
20828
|
+
return events.filter((e) => e.domain === domain);
|
|
20829
|
+
}
|
|
20830
|
+
function confidenceFromCount(count) {
|
|
20831
|
+
return Math.min(1, count / 10);
|
|
20832
|
+
}
|
|
20833
|
+
function clamp100(n) {
|
|
20834
|
+
if (n < 0) return 0;
|
|
20835
|
+
if (n > 100) return 100;
|
|
20836
|
+
return n;
|
|
20837
|
+
}
|
|
20838
|
+
var ZERO, CLARITY_EXTRACTOR, OWNERSHIP_EXTRACTOR, FOLLOW_THROUGH_EXTRACTOR, ALIGNMENT_EXTRACTOR, DECISION_MOMENTUM_EXTRACTOR, DEFAULT_SIGNAL_EXTRACTORS;
|
|
20839
|
+
var init_signals = __esm({
|
|
20840
|
+
"src/radiant/core/signals.ts"() {
|
|
20841
|
+
"use strict";
|
|
20842
|
+
init_domain();
|
|
20843
|
+
ZERO = { score: 0, eventCount: 0, confidence: 0 };
|
|
20844
|
+
CLARITY_EXTRACTOR = {
|
|
20845
|
+
id: "clarity",
|
|
20846
|
+
description: "Informativeness of event content \u2014 commit messages, PR bodies, review text",
|
|
20847
|
+
extract(events, domain) {
|
|
20848
|
+
const sub = inDomain(events, domain);
|
|
20849
|
+
if (sub.length === 0) return ZERO;
|
|
20850
|
+
const totalScore = sub.reduce((acc, e) => {
|
|
20851
|
+
const len = (e.event.content ?? "").length;
|
|
20852
|
+
const norm = Math.min(len, 200) / 200;
|
|
20853
|
+
return acc + norm * 100;
|
|
20854
|
+
}, 0);
|
|
20855
|
+
return {
|
|
20856
|
+
score: clamp100(totalScore / sub.length),
|
|
20857
|
+
eventCount: sub.length,
|
|
20858
|
+
confidence: confidenceFromCount(sub.length)
|
|
20859
|
+
};
|
|
20860
|
+
}
|
|
20861
|
+
};
|
|
20862
|
+
OWNERSHIP_EXTRACTOR = {
|
|
20863
|
+
id: "ownership",
|
|
20864
|
+
description: "Clarity of accountability \u2014 fraction of events with a known primary actor",
|
|
20865
|
+
extract(events, domain) {
|
|
20866
|
+
const sub = inDomain(events, domain);
|
|
20867
|
+
if (sub.length === 0) return ZERO;
|
|
20868
|
+
const attributed = sub.filter((e) => e.event.actor.kind !== "unknown").length;
|
|
20869
|
+
return {
|
|
20870
|
+
score: clamp100(attributed / sub.length * 100),
|
|
20871
|
+
eventCount: sub.length,
|
|
20872
|
+
confidence: confidenceFromCount(sub.length)
|
|
20873
|
+
};
|
|
20874
|
+
}
|
|
20875
|
+
};
|
|
20876
|
+
FOLLOW_THROUGH_EXTRACTOR = {
|
|
20877
|
+
id: "follow_through",
|
|
20878
|
+
description: "Fraction of events that were followed up \u2014 i.e. referenced by a later event",
|
|
20879
|
+
extract(events, domain) {
|
|
20880
|
+
const sub = inDomain(events, domain);
|
|
20881
|
+
if (sub.length === 0) return ZERO;
|
|
20882
|
+
const referencedIds = /* @__PURE__ */ new Set();
|
|
20883
|
+
for (const e of events) {
|
|
20884
|
+
const ref = e.event.respondsTo?.eventId;
|
|
20885
|
+
if (ref) referencedIds.add(ref);
|
|
20886
|
+
}
|
|
20887
|
+
const followedUp = sub.filter((e) => referencedIds.has(e.event.id)).length;
|
|
20888
|
+
return {
|
|
20889
|
+
score: clamp100(followedUp / sub.length * 100),
|
|
20890
|
+
eventCount: sub.length,
|
|
20891
|
+
confidence: confidenceFromCount(sub.length)
|
|
20892
|
+
};
|
|
20893
|
+
}
|
|
20894
|
+
};
|
|
20895
|
+
ALIGNMENT_EXTRACTOR = {
|
|
20896
|
+
id: "alignment",
|
|
20897
|
+
description: "Coordination pressure \u2014 fraction of events that reference a prior event",
|
|
20898
|
+
extract(events, domain) {
|
|
20899
|
+
const sub = inDomain(events, domain);
|
|
20900
|
+
if (sub.length === 0) return ZERO;
|
|
20901
|
+
const referencing = sub.filter((e) => e.event.respondsTo !== void 0).length;
|
|
20902
|
+
return {
|
|
20903
|
+
score: clamp100(referencing / sub.length * 100),
|
|
20904
|
+
eventCount: sub.length,
|
|
20905
|
+
confidence: confidenceFromCount(sub.length)
|
|
20906
|
+
};
|
|
20907
|
+
}
|
|
20908
|
+
};
|
|
20909
|
+
DECISION_MOMENTUM_EXTRACTOR = {
|
|
20910
|
+
id: "decision_momentum",
|
|
20911
|
+
description: "Rate of activity in this domain \u2014 events per day, capped at 10/day",
|
|
20912
|
+
extract(events, domain) {
|
|
20913
|
+
const sub = inDomain(events, domain);
|
|
20914
|
+
if (sub.length === 0) return ZERO;
|
|
20915
|
+
if (sub.length < 2) {
|
|
20916
|
+
return {
|
|
20917
|
+
score: 20,
|
|
20918
|
+
// token non-zero score — single event = some motion
|
|
20919
|
+
eventCount: sub.length,
|
|
20920
|
+
confidence: confidenceFromCount(sub.length)
|
|
20921
|
+
};
|
|
20922
|
+
}
|
|
20923
|
+
const ts = sub.map((e) => Date.parse(e.event.timestamp)).sort((a, b) => a - b);
|
|
20924
|
+
const spanMs = ts[ts.length - 1] - ts[0];
|
|
20925
|
+
const spanDays = Math.max(spanMs / (24 * 60 * 60 * 1e3), 1 / 24);
|
|
20926
|
+
const perDay = sub.length / spanDays;
|
|
20927
|
+
const normalized = Math.min(perDay, 10) / 10;
|
|
20928
|
+
return {
|
|
20929
|
+
score: clamp100(normalized * 100),
|
|
20930
|
+
eventCount: sub.length,
|
|
20931
|
+
confidence: confidenceFromCount(sub.length)
|
|
20932
|
+
};
|
|
20933
|
+
}
|
|
20934
|
+
};
|
|
20935
|
+
DEFAULT_SIGNAL_EXTRACTORS = Object.freeze([
|
|
20936
|
+
CLARITY_EXTRACTOR,
|
|
20937
|
+
OWNERSHIP_EXTRACTOR,
|
|
20938
|
+
FOLLOW_THROUGH_EXTRACTOR,
|
|
20939
|
+
ALIGNMENT_EXTRACTOR,
|
|
20940
|
+
DECISION_MOMENTUM_EXTRACTOR
|
|
20941
|
+
]);
|
|
20942
|
+
}
|
|
20943
|
+
});
|
|
20944
|
+
|
|
20945
|
+
// src/radiant/types.ts
|
|
20946
|
+
function isScored(s) {
|
|
20947
|
+
return typeof s === "number";
|
|
20948
|
+
}
|
|
20949
|
+
var DEFAULT_EVIDENCE_GATE;
|
|
20950
|
+
var init_types = __esm({
|
|
20951
|
+
"src/radiant/types.ts"() {
|
|
20952
|
+
"use strict";
|
|
20953
|
+
DEFAULT_EVIDENCE_GATE = { k: 3, c: 0.5 };
|
|
20954
|
+
}
|
|
20955
|
+
});
|
|
20956
|
+
|
|
20957
|
+
// src/radiant/core/math.ts
|
|
20958
|
+
function isPresent(o, gate = DEFAULT_EVIDENCE_GATE) {
|
|
20959
|
+
return o.eventCount >= gate.k && o.confidence >= gate.c;
|
|
20960
|
+
}
|
|
20961
|
+
function presenceAverage(items, gate = DEFAULT_EVIDENCE_GATE) {
|
|
20962
|
+
const present = items.filter((i) => isPresent(i, gate));
|
|
20963
|
+
if (present.length === 0) return "INSUFFICIENT_EVIDENCE";
|
|
20964
|
+
const sum = present.reduce((acc, i) => acc + i.score, 0);
|
|
20965
|
+
return sum / present.length;
|
|
20966
|
+
}
|
|
20967
|
+
function scoreLife(capability, gate = DEFAULT_EVIDENCE_GATE) {
|
|
20968
|
+
return presenceAverage(capability.dimensions, gate);
|
|
20969
|
+
}
|
|
20970
|
+
function scoreCyber(capability, gate = DEFAULT_EVIDENCE_GATE) {
|
|
20971
|
+
return presenceAverage(capability.dimensions, gate);
|
|
20972
|
+
}
|
|
20973
|
+
function scoreNeuroVerse(components, worldmodelLoaded, gate = DEFAULT_EVIDENCE_GATE) {
|
|
20974
|
+
if (!worldmodelLoaded) return "UNAVAILABLE";
|
|
20975
|
+
return presenceAverage(components, gate);
|
|
20976
|
+
}
|
|
20977
|
+
function scoreComposite(a_L, a_C, a_N) {
|
|
20978
|
+
const available = [];
|
|
20979
|
+
if (isScored(a_L)) available.push(a_L);
|
|
20980
|
+
if (isScored(a_C)) available.push(a_C);
|
|
20981
|
+
if (isScored(a_N)) available.push(a_N);
|
|
20982
|
+
if (available.length === 0) return "INSUFFICIENT_EVIDENCE";
|
|
20983
|
+
return available.reduce((a, b) => a + b, 0) / available.length;
|
|
20984
|
+
}
|
|
20985
|
+
var init_math = __esm({
|
|
20986
|
+
"src/radiant/core/math.ts"() {
|
|
20987
|
+
"use strict";
|
|
20988
|
+
init_types();
|
|
20989
|
+
}
|
|
20990
|
+
});
|
|
20991
|
+
|
|
20992
|
+
// src/radiant/core/patterns.ts
|
|
20993
|
+
async function interpretPatterns(input) {
|
|
20994
|
+
const prompt = buildInterpretationPrompt(input);
|
|
20995
|
+
const raw = await input.ai.complete(prompt, "Analyze the activity and produce the read.");
|
|
20996
|
+
const parsed = parseInterpretation(raw, input.canonicalPatterns ?? []);
|
|
20997
|
+
return {
|
|
20998
|
+
patterns: parsed.patterns,
|
|
20999
|
+
meaning: parsed.meaning,
|
|
21000
|
+
move: parsed.move,
|
|
21001
|
+
raw_ai_response: raw
|
|
21002
|
+
};
|
|
21003
|
+
}
|
|
21004
|
+
function buildInterpretationPrompt(input) {
|
|
21005
|
+
const signalSummary = formatSignalSummary(input.signals);
|
|
21006
|
+
const eventSample = formatEventSample(input.events, 30);
|
|
21007
|
+
const canonicalList = (input.canonicalPatterns ?? []).length > 0 ? `Patterns the organization has already named (use these names if you see them):
|
|
21008
|
+
${input.canonicalPatterns.map((p) => `- ${p}`).join("\n")}` : "No patterns have been named yet. Everything you observe is new.";
|
|
21009
|
+
const frame = input.lens.primary_frame;
|
|
21010
|
+
const evalQuestions = frame.evaluation_questions.map((q, i) => `${i + 1}. ${q}`).join("\n");
|
|
21011
|
+
const forbiddenList = input.lens.forbidden_phrases.map((p) => `- "${p}"`).join("\n");
|
|
21012
|
+
const jargonTable = Object.entries(input.lens.vocabulary.jargon_translations).map(([internal, plain]) => ` "${internal}" \u2192 "${plain}"`).join("\n");
|
|
21013
|
+
return `You are a behavioral intelligence system reading team activity and producing a read for the reader who needs to act on it.
|
|
21014
|
+
|
|
21015
|
+
## Context the reader has loaded
|
|
21016
|
+
|
|
21017
|
+
${input.worldmodelContent}
|
|
21018
|
+
|
|
21019
|
+
## What happened this window
|
|
21020
|
+
|
|
21021
|
+
### Signal matrix (what Radiant measured)
|
|
21022
|
+
|
|
21023
|
+
${signalSummary}
|
|
21024
|
+
|
|
21025
|
+
### Recent events (sample)
|
|
21026
|
+
|
|
21027
|
+
${eventSample}
|
|
21028
|
+
|
|
21029
|
+
## How to reason
|
|
21030
|
+
|
|
21031
|
+
Reason through these questions INTERNALLY \u2014 do not list them in your output:
|
|
21032
|
+
|
|
21033
|
+
${evalQuestions}
|
|
21034
|
+
|
|
21035
|
+
Scoring rubric: ${frame.scoring_rubric}
|
|
21036
|
+
|
|
21037
|
+
${canonicalList}
|
|
21038
|
+
|
|
21039
|
+
${input.statedIntent ? input.statedIntent + "\n" : ""}## Voice: speak like an Auki builder, not like a status report
|
|
21040
|
+
|
|
21041
|
+
The reader wants to know **what this means and what to do**, not "what happened." Frame every observation as consequence + implication, not just description.
|
|
21042
|
+
|
|
21043
|
+
Wrong voice (status report):
|
|
21044
|
+
"Rapid deployment of complex technical architecture through composable commits."
|
|
21045
|
+
"Signal extraction across life, cyber, and joint domains enables consistent behavioral analysis."
|
|
21046
|
+
"Decision momentum scores suggest architectural delivery without corresponding strategic direction setting."
|
|
21047
|
+
|
|
21048
|
+
Right voice (Auki builder):
|
|
21049
|
+
"Shipping pace is high. The architecture is getting ahead of strategic decisions \u2014 velocity without a declared target."
|
|
21050
|
+
"Every pattern is new. Nothing is being tracked by name yet. That's fine for now; it becomes a problem when patterns repeat and you still don't have vocabulary for them."
|
|
21051
|
+
"The work is converging across three modules. The story of HOW they compose isn't being told yet."
|
|
21052
|
+
|
|
21053
|
+
The difference: consequence in plain English, not observation in system vocabulary.
|
|
21054
|
+
|
|
21055
|
+
## Translate internal jargon to plain English
|
|
21056
|
+
|
|
21057
|
+
Readers don't know Radiant's vocabulary. Before ANY description appears in your output, translate these:
|
|
21058
|
+
|
|
21059
|
+
${jargonTable}
|
|
21060
|
+
|
|
21061
|
+
For example: don't say "update the worldmodel." Say "add a line to your strategy file."
|
|
21062
|
+
|
|
21063
|
+
## Health is a valid read
|
|
21064
|
+
|
|
21065
|
+
If the activity is healthy and aligned with the worldmodel, SAY SO. Don't fabricate problems. Over-prescription is a voice failure. Legitimate outputs include:
|
|
21066
|
+
|
|
21067
|
+
"Nothing's broken. Keep shipping."
|
|
21068
|
+
"This is what healthy looks like \u2014 the invariants are holding."
|
|
21069
|
+
"Nothing here needs action."
|
|
21070
|
+
|
|
21071
|
+
Only recommend a move when the evidence actually calls for one.
|
|
21072
|
+
|
|
21073
|
+
## Output schema \u2014 JSON object
|
|
21074
|
+
|
|
21075
|
+
\`\`\`json
|
|
21076
|
+
{
|
|
21077
|
+
"patterns": [
|
|
21078
|
+
{
|
|
21079
|
+
"name": "pattern_name_snake_case",
|
|
21080
|
+
"type": "canonical" | "candidate",
|
|
21081
|
+
"description": "Consequence-framed, plain-English, 1-2 sentences. The reader understands why this matters, not just what you observed.",
|
|
21082
|
+
"evidence": {
|
|
21083
|
+
"signals": ["signal_id.domain", ...],
|
|
21084
|
+
"events": ["event_id", ...],
|
|
21085
|
+
"cited_invariant": "invariant_name_or_null"
|
|
21086
|
+
},
|
|
21087
|
+
"confidence": 0.0 to 1.0
|
|
21088
|
+
}
|
|
21089
|
+
],
|
|
21090
|
+
"meaning": "3-5 sentences. Weave the patterns into ONE strategic thesis. Compress. The reader should finish this paragraph and understand the one thing that matters most in this read. Plain English \u2014 no system jargon.",
|
|
21091
|
+
"move": "1-3 direct imperatives, OR explicit 'nothing to act on' if the read is healthy. Do not fabricate urgency. Examples: 'Force cross-module ownership this sprint.' / 'Nothing's broken. Keep shipping.' / 'If you want future reads to track this pattern by name, add a line to your strategy file.'"
|
|
21092
|
+
}
|
|
21093
|
+
\`\`\`
|
|
21094
|
+
|
|
21095
|
+
## Hard rules
|
|
21096
|
+
|
|
21097
|
+
- Every signal you cite MUST appear in the signal matrix above
|
|
21098
|
+
- Every event you cite MUST appear in the events sample above
|
|
21099
|
+
- Do not invent signals or events that aren't in the data
|
|
21100
|
+
- Candidate patterns must have type "candidate"
|
|
21101
|
+
- No hedging, no hype vocabulary
|
|
21102
|
+
- Apply jargon translation before output
|
|
21103
|
+
- Health-is-valid \u2014 don't invent problems
|
|
21104
|
+
- Return ONLY the JSON object, no other text
|
|
21105
|
+
|
|
21106
|
+
Do NOT use these phrases anywhere in your output:
|
|
21107
|
+
${forbiddenList}`;
|
|
21108
|
+
}
|
|
21109
|
+
function formatSignalSummary(signals) {
|
|
21110
|
+
const lines = [];
|
|
21111
|
+
const domains = ["life", "cyber", "joint"];
|
|
21112
|
+
for (const domain of domains) {
|
|
21113
|
+
const domainSignals = signals.filter((s) => s.domain === domain);
|
|
21114
|
+
if (domainSignals.length === 0) continue;
|
|
21115
|
+
lines.push(`### ${domain}`);
|
|
21116
|
+
for (const s of domainSignals) {
|
|
21117
|
+
const gate = s.eventCount >= 3 && s.confidence >= 0.5 ? "\u2713" : "\u25CB";
|
|
21118
|
+
lines.push(
|
|
21119
|
+
` ${gate} ${s.id}: score=${s.score.toFixed(1)}, events=${s.eventCount}, conf=${s.confidence.toFixed(2)}`
|
|
21120
|
+
);
|
|
21121
|
+
}
|
|
21122
|
+
}
|
|
21123
|
+
return lines.join("\n");
|
|
21124
|
+
}
|
|
21125
|
+
function formatEventSample(events, maxEvents) {
|
|
21126
|
+
const sample = events.slice(-maxEvents);
|
|
21127
|
+
return sample.map((e) => {
|
|
21128
|
+
const content = (e.event.content ?? "").slice(0, 200);
|
|
21129
|
+
const respondsTo = e.event.respondsTo ? ` (responds to ${e.event.respondsTo.eventId})` : "";
|
|
21130
|
+
return `- [${e.domain}] ${e.event.id} | ${e.event.actor.kind}:${e.event.actor.id} | ${e.event.kind ?? "event"}${respondsTo}
|
|
21131
|
+
"${content}"`;
|
|
21132
|
+
}).join("\n");
|
|
21133
|
+
}
|
|
21134
|
+
function parseInterpretation(raw, canonicalNames) {
|
|
21135
|
+
let meaning = "";
|
|
21136
|
+
let move = "";
|
|
21137
|
+
let patternsArray = [];
|
|
21138
|
+
const objMatch = raw.match(/\{[\s\S]*"patterns"[\s\S]*\}/);
|
|
21139
|
+
if (objMatch) {
|
|
21140
|
+
try {
|
|
21141
|
+
const obj = JSON.parse(objMatch[0]);
|
|
21142
|
+
if (Array.isArray(obj.patterns)) {
|
|
21143
|
+
patternsArray = obj.patterns;
|
|
21144
|
+
}
|
|
21145
|
+
if (typeof obj.meaning === "string") meaning = obj.meaning;
|
|
21146
|
+
if (typeof obj.move === "string") move = obj.move;
|
|
21147
|
+
} catch {
|
|
21148
|
+
}
|
|
21149
|
+
}
|
|
21150
|
+
if (patternsArray.length === 0) {
|
|
21151
|
+
const arrMatch = raw.match(/\[[\s\S]*\]/);
|
|
21152
|
+
if (arrMatch) {
|
|
21153
|
+
try {
|
|
21154
|
+
const arr = JSON.parse(arrMatch[0]);
|
|
21155
|
+
if (Array.isArray(arr)) patternsArray = arr;
|
|
21156
|
+
} catch {
|
|
21157
|
+
}
|
|
21158
|
+
}
|
|
21159
|
+
}
|
|
21160
|
+
const canonicalSet = new Set(canonicalNames.map((n) => n.toLowerCase()));
|
|
21161
|
+
const patterns = [];
|
|
21162
|
+
for (const item of patternsArray) {
|
|
21163
|
+
if (!isPatternLike(item)) continue;
|
|
21164
|
+
const nameStr = String(item.name ?? "unnamed");
|
|
21165
|
+
const ev = item.evidence;
|
|
21166
|
+
const isCanonical = item.type === "canonical" || canonicalSet.has(nameStr.toLowerCase());
|
|
21167
|
+
patterns.push({
|
|
21168
|
+
name: nameStr,
|
|
21169
|
+
type: isCanonical ? "canonical" : "candidate",
|
|
21170
|
+
declaredAs: isCanonical ? nameStr : void 0,
|
|
21171
|
+
description: String(item.description ?? ""),
|
|
21172
|
+
evidence: {
|
|
21173
|
+
signals: Array.isArray(ev?.signals) ? ev.signals.map(String) : [],
|
|
21174
|
+
events: Array.isArray(ev?.events) ? ev.events.map(String) : [],
|
|
21175
|
+
cited_invariant: ev?.cited_invariant ? String(ev.cited_invariant) : void 0
|
|
21176
|
+
},
|
|
21177
|
+
confidence: typeof item.confidence === "number" ? Math.max(0, Math.min(1, item.confidence)) : 0.5
|
|
21178
|
+
});
|
|
21179
|
+
}
|
|
21180
|
+
return { patterns, meaning, move };
|
|
21181
|
+
}
|
|
21182
|
+
function isPatternLike(x) {
|
|
21183
|
+
return typeof x === "object" && x !== null && "name" in x;
|
|
21184
|
+
}
|
|
21185
|
+
var init_patterns = __esm({
|
|
21186
|
+
"src/radiant/core/patterns.ts"() {
|
|
21187
|
+
"use strict";
|
|
21188
|
+
}
|
|
21189
|
+
});
|
|
21190
|
+
|
|
21191
|
+
// src/radiant/core/renderer.ts
|
|
21192
|
+
function render(input) {
|
|
21193
|
+
const text = renderText(input);
|
|
21194
|
+
const frontmatter = renderFrontmatter(input);
|
|
21195
|
+
return { text, frontmatter };
|
|
21196
|
+
}
|
|
21197
|
+
function renderText(input) {
|
|
21198
|
+
const sections = [];
|
|
21199
|
+
sections.push(
|
|
21200
|
+
`Scope: ${formatScope(input.scope)}
|
|
21201
|
+
Window: last ${input.windowDays} days \xB7 ${input.eventCount} events
|
|
21202
|
+
Lens: ${input.lens.name}`
|
|
21203
|
+
);
|
|
21204
|
+
if (input.patterns.length > 0) {
|
|
21205
|
+
const canonical = input.patterns.filter((p) => p.type === "canonical");
|
|
21206
|
+
const candidates = input.patterns.filter((p) => p.type === "candidate");
|
|
21207
|
+
let emergentBlock = "EMERGENT\n";
|
|
21208
|
+
if (canonical.length > 0) {
|
|
21209
|
+
for (const p of canonical) {
|
|
21210
|
+
emergentBlock += `
|
|
21211
|
+
${p.name}
|
|
21212
|
+
`;
|
|
21213
|
+
emergentBlock += ` ${p.description}
|
|
21214
|
+
`;
|
|
21215
|
+
}
|
|
21216
|
+
}
|
|
21217
|
+
if (candidates.length > 0) {
|
|
21218
|
+
emergentBlock += "\n Emergent (candidates \u2014 not yet in worldmodel)\n";
|
|
21219
|
+
for (const p of candidates) {
|
|
21220
|
+
emergentBlock += `
|
|
21221
|
+
${p.name} (candidate)
|
|
21222
|
+
`;
|
|
21223
|
+
emergentBlock += ` ${p.description}
|
|
21224
|
+
`;
|
|
21225
|
+
if (p.evidence.cited_invariant) {
|
|
21226
|
+
emergentBlock += ` Cited invariant: ${p.evidence.cited_invariant}
|
|
21227
|
+
`;
|
|
21228
|
+
}
|
|
21229
|
+
}
|
|
21230
|
+
}
|
|
21231
|
+
sections.push(emergentBlock.trimEnd());
|
|
21232
|
+
}
|
|
21233
|
+
if (input.meaning) {
|
|
21234
|
+
sections.push(`MEANING
|
|
21235
|
+
|
|
21236
|
+
${input.meaning.split("\n").join("\n ")}`);
|
|
21237
|
+
}
|
|
21238
|
+
if (input.move) {
|
|
21239
|
+
sections.push(`MOVE
|
|
21240
|
+
|
|
21241
|
+
${input.move.split("\n").join("\n ")}`);
|
|
21242
|
+
}
|
|
21243
|
+
const alignBlock = [
|
|
21244
|
+
"ALIGNMENT",
|
|
21245
|
+
"",
|
|
21246
|
+
` Human work: ${formatScore(input.scores.A_L)}`,
|
|
21247
|
+
` AI work: ${formatScore(input.scores.A_C)}`,
|
|
21248
|
+
` Human\u2013AI collaboration: ${formatScore(input.scores.A_N)}`,
|
|
21249
|
+
` Composite: ${formatScore(input.scores.R)}`
|
|
21250
|
+
].join("\n");
|
|
21251
|
+
sections.push(alignBlock);
|
|
21252
|
+
sections.push(renderDepth(input.priorReadCount ?? 0, input.windowDays));
|
|
21253
|
+
return sections.join("\n\n");
|
|
21254
|
+
}
|
|
21255
|
+
function renderDepth(priorReads, windowDays) {
|
|
21256
|
+
if (priorReads === 0) {
|
|
21257
|
+
return [
|
|
21258
|
+
"DEPTH",
|
|
21259
|
+
"",
|
|
21260
|
+
` This is your first read. Radiant sees ${windowDays} days of activity`,
|
|
21261
|
+
" but has no prior baseline to compare against.",
|
|
21262
|
+
"",
|
|
21263
|
+
" Available now:",
|
|
21264
|
+
" \u2713 Signal extraction across life / cyber / joint domains",
|
|
21265
|
+
" \u2713 Pattern identification (canonical + candidates)",
|
|
21266
|
+
" \u2713 Alignment scoring",
|
|
21267
|
+
"",
|
|
21268
|
+
" Available after 2+ reads:",
|
|
21269
|
+
" \xB7 Drift detection (is alignment improving or degrading?)",
|
|
21270
|
+
' \xB7 Baselines (what does "normal" look like for this team?)',
|
|
21271
|
+
" \xB7 Pattern confidence (are these patterns persistent or noise?)",
|
|
21272
|
+
" \xB7 Evolution proposals (should the worldmodel adapt?)",
|
|
21273
|
+
"",
|
|
21274
|
+
" Run again next week. The read gets sharper every time."
|
|
21275
|
+
].join("\n");
|
|
21276
|
+
}
|
|
21277
|
+
if (priorReads < 4) {
|
|
21278
|
+
return [
|
|
21279
|
+
"DEPTH",
|
|
21280
|
+
"",
|
|
21281
|
+
` Read ${priorReads + 1} of this scope. Baseline forming.`,
|
|
21282
|
+
"",
|
|
21283
|
+
" Available now:",
|
|
21284
|
+
" \u2713 Signal extraction + pattern identification + alignment scoring",
|
|
21285
|
+
` \u2713 Drift detection (comparing against ${priorReads} prior read${priorReads > 1 ? "s" : ""})`,
|
|
21286
|
+
" \xB7 Baselines stabilizing (need 4+ reads for reliable averages)",
|
|
21287
|
+
" \xB7 Pattern confidence accumulating",
|
|
21288
|
+
"",
|
|
21289
|
+
" The read sharpens with each run."
|
|
21290
|
+
].join("\n");
|
|
21291
|
+
}
|
|
21292
|
+
return [
|
|
21293
|
+
"DEPTH",
|
|
21294
|
+
"",
|
|
21295
|
+
` Read ${priorReads + 1} of this scope. Baseline established.`,
|
|
21296
|
+
"",
|
|
21297
|
+
" Available:",
|
|
21298
|
+
" \u2713 Signal extraction + pattern identification + alignment scoring",
|
|
21299
|
+
" \u2713 Drift detection against established baseline",
|
|
21300
|
+
" \u2713 Pattern confidence (persistent vs noise)",
|
|
21301
|
+
" \u2713 Evolution proposals (candidate patterns with enough history to evaluate)"
|
|
21302
|
+
].join("\n");
|
|
21303
|
+
}
|
|
21304
|
+
function formatScore(s) {
|
|
21305
|
+
if (!isScored(s)) {
|
|
21306
|
+
if (s === "UNAVAILABLE") return "not available (no worldmodel loaded)";
|
|
21307
|
+
return "not enough signal to call yet";
|
|
21308
|
+
}
|
|
21309
|
+
const n = Math.round(s);
|
|
21310
|
+
let label;
|
|
21311
|
+
if (n >= 75) label = "STRONG";
|
|
21312
|
+
else if (n >= 60) label = "STABLE";
|
|
21313
|
+
else if (n >= 45) label = "needs attention";
|
|
21314
|
+
else if (n >= 30) label = "concerning";
|
|
21315
|
+
else label = "critical";
|
|
21316
|
+
return `${n} \xB7 ${label}`;
|
|
21317
|
+
}
|
|
21318
|
+
function renderFrontmatter(input) {
|
|
21319
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
21320
|
+
const signalsByDomain = groupSignalsByDomain(input.signals);
|
|
21321
|
+
const patternEntries = input.patterns.map((p) => {
|
|
21322
|
+
const entry = {
|
|
21323
|
+
name: p.name,
|
|
21324
|
+
type: p.type,
|
|
21325
|
+
conf: Number(p.confidence.toFixed(2)),
|
|
21326
|
+
evidence_signals: p.evidence.signals,
|
|
21327
|
+
evidence_events: p.evidence.events
|
|
21328
|
+
};
|
|
21329
|
+
if (p.evidence.cited_invariant) {
|
|
21330
|
+
entry.cited_invariant = p.evidence.cited_invariant;
|
|
21331
|
+
}
|
|
21332
|
+
return entry;
|
|
21333
|
+
});
|
|
21334
|
+
const frontmatter = {
|
|
21335
|
+
radiant_read: {
|
|
21336
|
+
scope: formatScope(input.scope),
|
|
21337
|
+
window: `${input.windowDays}d`,
|
|
21338
|
+
timestamp: now,
|
|
21339
|
+
lens: input.lens.name
|
|
21340
|
+
},
|
|
21341
|
+
events: {
|
|
21342
|
+
total: input.eventCount
|
|
21343
|
+
},
|
|
21344
|
+
signals: signalsByDomain,
|
|
21345
|
+
scores: {
|
|
21346
|
+
A_L: isScored(input.scores.A_L) ? Math.round(input.scores.A_L) : String(input.scores.A_L),
|
|
21347
|
+
A_C: isScored(input.scores.A_C) ? Math.round(input.scores.A_C) : String(input.scores.A_C),
|
|
21348
|
+
A_N: isScored(input.scores.A_N) ? Math.round(input.scores.A_N) : String(input.scores.A_N),
|
|
21349
|
+
R: isScored(input.scores.R) ? Math.round(input.scores.R) : String(input.scores.R)
|
|
21350
|
+
},
|
|
21351
|
+
patterns: patternEntries
|
|
21352
|
+
};
|
|
21353
|
+
return "---\n" + serializeYAML(frontmatter) + "---";
|
|
21354
|
+
}
|
|
21355
|
+
function groupSignalsByDomain(signals) {
|
|
21356
|
+
const result = {};
|
|
21357
|
+
for (const s of signals) {
|
|
21358
|
+
if (!result[s.domain]) result[s.domain] = {};
|
|
21359
|
+
result[s.domain][s.id] = {
|
|
21360
|
+
score: Number(s.score.toFixed(1)),
|
|
21361
|
+
n: s.eventCount,
|
|
21362
|
+
conf: Number(s.confidence.toFixed(2))
|
|
21363
|
+
};
|
|
21364
|
+
}
|
|
21365
|
+
return result;
|
|
21366
|
+
}
|
|
21367
|
+
function serializeYAML(obj, indent = 0) {
|
|
21368
|
+
const pad = " ".repeat(indent);
|
|
21369
|
+
if (obj === null || obj === void 0) return "null\n";
|
|
21370
|
+
if (typeof obj === "string") return `${JSON.stringify(obj)}
|
|
21371
|
+
`;
|
|
21372
|
+
if (typeof obj === "number" || typeof obj === "boolean") return `${obj}
|
|
21373
|
+
`;
|
|
21374
|
+
if (Array.isArray(obj)) {
|
|
21375
|
+
if (obj.length === 0) return "[]\n";
|
|
21376
|
+
if (obj.every((item) => typeof item === "string" || typeof item === "number")) {
|
|
21377
|
+
return `[${obj.map((item) => JSON.stringify(item)).join(", ")}]
|
|
21378
|
+
`;
|
|
21379
|
+
}
|
|
21380
|
+
let result = "\n";
|
|
21381
|
+
for (const item of obj) {
|
|
21382
|
+
if (typeof item === "object" && item !== null && !Array.isArray(item)) {
|
|
21383
|
+
const entries = Object.entries(item);
|
|
21384
|
+
result += `${pad}- ${entries[0][0]}: ${serializeYAML(entries[0][1], 0).trim()}
|
|
21385
|
+
`;
|
|
21386
|
+
for (let i = 1; i < entries.length; i++) {
|
|
21387
|
+
result += `${pad} ${entries[i][0]}: ${serializeYAML(entries[i][1], indent + 2).trim()}
|
|
21388
|
+
`;
|
|
21389
|
+
}
|
|
21390
|
+
} else {
|
|
21391
|
+
result += `${pad}- ${serializeYAML(item, indent + 1).trim()}
|
|
21392
|
+
`;
|
|
21393
|
+
}
|
|
21394
|
+
}
|
|
21395
|
+
return result;
|
|
21396
|
+
}
|
|
21397
|
+
if (typeof obj === "object") {
|
|
21398
|
+
const entries = Object.entries(obj);
|
|
21399
|
+
if (entries.length === 0) return "{}\n";
|
|
21400
|
+
let result = "\n";
|
|
21401
|
+
for (const [key, value] of entries) {
|
|
21402
|
+
if (typeof value === "object" && value !== null) {
|
|
21403
|
+
result += `${pad}${key}:${serializeYAML(value, indent + 1)}`;
|
|
21404
|
+
} else {
|
|
21405
|
+
result += `${pad}${key}: ${serializeYAML(value, indent).trim()}
|
|
21406
|
+
`;
|
|
21407
|
+
}
|
|
21408
|
+
}
|
|
21409
|
+
return result;
|
|
21410
|
+
}
|
|
21411
|
+
return `${obj}
|
|
21412
|
+
`;
|
|
21413
|
+
}
|
|
21414
|
+
var init_renderer = __esm({
|
|
21415
|
+
"src/radiant/core/renderer.ts"() {
|
|
21416
|
+
"use strict";
|
|
21417
|
+
init_types();
|
|
21418
|
+
init_scopes();
|
|
21419
|
+
}
|
|
21420
|
+
});
|
|
21421
|
+
|
|
21422
|
+
// src/radiant/commands/emergent.ts
|
|
21423
|
+
async function emergent(input) {
|
|
21424
|
+
const lens = resolveLens2(input.lensId);
|
|
21425
|
+
const windowDays = input.windowDays ?? 14;
|
|
21426
|
+
let statedIntent;
|
|
21427
|
+
let exocortexContext;
|
|
21428
|
+
if (input.exocortexPath) {
|
|
21429
|
+
exocortexContext = readExocortex(input.exocortexPath);
|
|
21430
|
+
const formatted = formatExocortexForPrompt(exocortexContext);
|
|
21431
|
+
if (formatted) statedIntent = formatted;
|
|
21432
|
+
}
|
|
21433
|
+
const events = await fetchGitHubActivity(input.scope, input.githubToken, {
|
|
21434
|
+
windowDays
|
|
21435
|
+
});
|
|
21436
|
+
const classified = classifyEvents(events);
|
|
21437
|
+
const signals = extractSignals(classified);
|
|
21438
|
+
const scores = computeScores(signals, input.worldmodelContent !== "");
|
|
21439
|
+
const { patterns, meaning, move } = await interpretPatterns({
|
|
21440
|
+
signals,
|
|
21441
|
+
events: classified,
|
|
21442
|
+
worldmodelContent: input.worldmodelContent,
|
|
21443
|
+
lens,
|
|
21444
|
+
ai: input.ai,
|
|
21445
|
+
canonicalPatterns: input.canonicalPatterns,
|
|
21446
|
+
statedIntent
|
|
21447
|
+
});
|
|
21448
|
+
const rewrittenPatterns = patterns.map((p) => lens.rewrite(p));
|
|
21449
|
+
const allDescriptions = rewrittenPatterns.map((p) => p.description).join("\n");
|
|
21450
|
+
const voiceViolations = checkForbiddenPhrases(lens, allDescriptions);
|
|
21451
|
+
const rendered = render({
|
|
21452
|
+
scope: input.scope,
|
|
21453
|
+
windowDays,
|
|
21454
|
+
eventCount: events.length,
|
|
21455
|
+
signals,
|
|
21456
|
+
patterns: rewrittenPatterns,
|
|
21457
|
+
scores,
|
|
21458
|
+
lens,
|
|
21459
|
+
meaning: meaning || void 0,
|
|
21460
|
+
move: move || void 0
|
|
21461
|
+
});
|
|
21462
|
+
return {
|
|
21463
|
+
text: rendered.text,
|
|
21464
|
+
frontmatter: rendered.frontmatter,
|
|
21465
|
+
voiceViolations,
|
|
21466
|
+
voiceClean: voiceViolations.length === 0,
|
|
21467
|
+
signals,
|
|
21468
|
+
scores,
|
|
21469
|
+
eventCount: events.length
|
|
21470
|
+
};
|
|
21471
|
+
}
|
|
21472
|
+
function computeScores(signals, worldmodelLoaded) {
|
|
21473
|
+
const gate = DEFAULT_EVIDENCE_GATE;
|
|
21474
|
+
const lifeSignals = signals.filter((s) => s.domain === "life");
|
|
21475
|
+
const A_L = scoreLife(
|
|
21476
|
+
{ dimensions: lifeSignals.map(signalToDimension) },
|
|
21477
|
+
gate
|
|
21478
|
+
);
|
|
21479
|
+
const cyberSignals = signals.filter((s) => s.domain === "cyber");
|
|
21480
|
+
const A_C = scoreCyber(
|
|
21481
|
+
{ dimensions: cyberSignals.map(signalToDimension) },
|
|
21482
|
+
gate
|
|
21483
|
+
);
|
|
21484
|
+
const jointSignals = signals.filter((s) => s.domain === "joint");
|
|
21485
|
+
const A_N = scoreNeuroVerse(
|
|
21486
|
+
jointSignals.map(signalToBridging),
|
|
21487
|
+
worldmodelLoaded,
|
|
21488
|
+
gate
|
|
21489
|
+
);
|
|
21490
|
+
const R = scoreComposite(A_L, A_C, A_N);
|
|
21491
|
+
return { A_L, A_C, A_N, R };
|
|
21492
|
+
}
|
|
21493
|
+
function signalToDimension(s) {
|
|
21494
|
+
return {
|
|
21495
|
+
id: s.id,
|
|
21496
|
+
score: s.score,
|
|
21497
|
+
eventCount: s.eventCount,
|
|
21498
|
+
confidence: s.confidence
|
|
21499
|
+
};
|
|
21500
|
+
}
|
|
21501
|
+
function signalToBridging(s) {
|
|
21502
|
+
return {
|
|
21503
|
+
component: "ALIGN",
|
|
21504
|
+
// Proxy: joint signals → ALIGN component
|
|
21505
|
+
score: s.score,
|
|
21506
|
+
eventCount: s.eventCount,
|
|
21507
|
+
confidence: s.confidence
|
|
21508
|
+
};
|
|
21509
|
+
}
|
|
21510
|
+
function resolveLens2(id) {
|
|
21511
|
+
const lens = getLens2(id);
|
|
21512
|
+
if (!lens) {
|
|
21513
|
+
throw new Error(
|
|
21514
|
+
`Lens "${id}" not found. Check the id or register the lens.`
|
|
21515
|
+
);
|
|
21516
|
+
}
|
|
21517
|
+
return lens;
|
|
21518
|
+
}
|
|
21519
|
+
var init_emergent = __esm({
|
|
21520
|
+
"src/radiant/commands/emergent.ts"() {
|
|
21521
|
+
"use strict";
|
|
21522
|
+
init_lenses();
|
|
21523
|
+
init_github2();
|
|
21524
|
+
init_exocortex();
|
|
21525
|
+
init_signals();
|
|
21526
|
+
init_math();
|
|
21527
|
+
init_patterns();
|
|
21528
|
+
init_renderer();
|
|
21529
|
+
init_voice_check();
|
|
21530
|
+
init_types();
|
|
21531
|
+
}
|
|
21532
|
+
});
|
|
21533
|
+
|
|
21534
|
+
// src/radiant/core/ai.ts
|
|
21535
|
+
function createAnthropicAI(apiKey, model = "claude-sonnet-4-20250514", maxTokens = 4096) {
|
|
21536
|
+
return {
|
|
21537
|
+
async complete(systemPrompt, userQuery) {
|
|
21538
|
+
const res = await fetch("https://api.anthropic.com/v1/messages", {
|
|
21539
|
+
method: "POST",
|
|
21540
|
+
headers: {
|
|
21541
|
+
"x-api-key": apiKey,
|
|
21542
|
+
"anthropic-version": "2023-06-01",
|
|
21543
|
+
"content-type": "application/json"
|
|
21544
|
+
},
|
|
21545
|
+
body: JSON.stringify({
|
|
21546
|
+
model,
|
|
21547
|
+
max_tokens: maxTokens,
|
|
21548
|
+
system: systemPrompt,
|
|
21549
|
+
messages: [{ role: "user", content: userQuery }]
|
|
21550
|
+
})
|
|
21551
|
+
});
|
|
21552
|
+
if (!res.ok) {
|
|
21553
|
+
const body = await res.text();
|
|
21554
|
+
throw new Error(
|
|
21555
|
+
`Anthropic API error ${res.status}: ${body.slice(0, 500)}`
|
|
21556
|
+
);
|
|
21557
|
+
}
|
|
21558
|
+
const data = await res.json();
|
|
21559
|
+
const text = data.content?.filter((c) => c.type === "text").map((c) => c.text ?? "").join("");
|
|
21560
|
+
if (!text) {
|
|
21561
|
+
throw new Error("Anthropic returned no text content");
|
|
21562
|
+
}
|
|
21563
|
+
return text;
|
|
21564
|
+
}
|
|
21565
|
+
};
|
|
21566
|
+
}
|
|
21567
|
+
var init_ai = __esm({
|
|
21568
|
+
"src/radiant/core/ai.ts"() {
|
|
21569
|
+
"use strict";
|
|
21570
|
+
}
|
|
21571
|
+
});
|
|
21572
|
+
|
|
21573
|
+
// src/cli/radiant.ts
|
|
21574
|
+
var radiant_exports = {};
|
|
21575
|
+
__export(radiant_exports, {
|
|
21576
|
+
main: () => main34
|
|
21577
|
+
});
|
|
21578
|
+
function parseArgs27(argv) {
|
|
21579
|
+
const result = {
|
|
21580
|
+
subcommand: void 0,
|
|
21581
|
+
lens: void 0,
|
|
21582
|
+
worlds: void 0,
|
|
21583
|
+
query: void 0,
|
|
21584
|
+
model: void 0,
|
|
21585
|
+
exocortex: void 0,
|
|
21586
|
+
json: false,
|
|
21587
|
+
help: false,
|
|
21588
|
+
rest: []
|
|
21589
|
+
};
|
|
21590
|
+
let i = 0;
|
|
21591
|
+
if (argv.length > 0 && !argv[0].startsWith("-")) {
|
|
21592
|
+
result.subcommand = argv[0];
|
|
21593
|
+
i = 1;
|
|
21594
|
+
}
|
|
21595
|
+
while (i < argv.length) {
|
|
21596
|
+
const arg = argv[i];
|
|
21597
|
+
switch (arg) {
|
|
21598
|
+
case "--lens":
|
|
21599
|
+
result.lens = argv[++i];
|
|
21600
|
+
break;
|
|
21601
|
+
case "--worlds":
|
|
21602
|
+
result.worlds = argv[++i];
|
|
21603
|
+
break;
|
|
21604
|
+
case "--query":
|
|
21605
|
+
result.query = argv[++i];
|
|
21606
|
+
break;
|
|
21607
|
+
case "--model":
|
|
21608
|
+
result.model = argv[++i];
|
|
21609
|
+
break;
|
|
21610
|
+
case "--exocortex":
|
|
21611
|
+
result.exocortex = argv[++i];
|
|
21612
|
+
break;
|
|
21613
|
+
case "--json":
|
|
21614
|
+
result.json = true;
|
|
21615
|
+
break;
|
|
21616
|
+
case "--help":
|
|
21617
|
+
case "-h":
|
|
21618
|
+
result.help = true;
|
|
21619
|
+
break;
|
|
21620
|
+
default:
|
|
21621
|
+
result.rest.push(arg);
|
|
21622
|
+
break;
|
|
21623
|
+
}
|
|
21624
|
+
i++;
|
|
21625
|
+
}
|
|
21626
|
+
return result;
|
|
21627
|
+
}
|
|
21628
|
+
function loadWorldmodelContent(worldsPath) {
|
|
21629
|
+
const resolved = (0, import_path14.resolve)(worldsPath);
|
|
21630
|
+
if (!(0, import_fs13.existsSync)(resolved)) {
|
|
21631
|
+
throw new Error(`Worlds path not found: ${resolved}`);
|
|
21632
|
+
}
|
|
21633
|
+
const stat = (0, import_fs13.statSync)(resolved);
|
|
21634
|
+
if (stat.isFile()) {
|
|
21635
|
+
return (0, import_fs13.readFileSync)(resolved, "utf-8");
|
|
21636
|
+
}
|
|
21637
|
+
if (stat.isDirectory()) {
|
|
21638
|
+
const files = (0, import_fs13.readdirSync)(resolved).filter(
|
|
21639
|
+
(f) => (0, import_path14.extname)(f) === ".md" && (f.endsWith(".worldmodel.md") || f.endsWith(".nv-world.md"))
|
|
21640
|
+
).sort();
|
|
21641
|
+
if (files.length === 0) {
|
|
21642
|
+
throw new Error(
|
|
21643
|
+
`No .worldmodel.md or .nv-world.md files found in ${resolved}`
|
|
21644
|
+
);
|
|
21645
|
+
}
|
|
21646
|
+
return files.map((f) => {
|
|
21647
|
+
const content = (0, import_fs13.readFileSync)((0, import_path14.join)(resolved, f), "utf-8");
|
|
21648
|
+
return `<!-- worldmodel: ${f} -->
|
|
21649
|
+
${content}`;
|
|
21650
|
+
}).join("\n\n---\n\n");
|
|
21651
|
+
}
|
|
21652
|
+
throw new Error(`Worlds path is neither a file nor a directory: ${resolved}`);
|
|
21653
|
+
}
|
|
21654
|
+
async function cmdThink(args) {
|
|
21655
|
+
const lensId = args.lens ?? process.env.RADIANT_LENS;
|
|
21656
|
+
if (!lensId) {
|
|
21657
|
+
process.stderr.write(
|
|
21658
|
+
`${RED2}Error:${RESET3} --lens <id> or RADIANT_LENS required.
|
|
21659
|
+
${DIM3}Available lenses: ${listLenses().join(", ")}${RESET3}
|
|
21660
|
+
`
|
|
21661
|
+
);
|
|
21662
|
+
process.exit(1);
|
|
21663
|
+
}
|
|
21664
|
+
const worldsPath = args.worlds ?? process.env.RADIANT_WORLDS;
|
|
21665
|
+
if (!worldsPath) {
|
|
21666
|
+
process.stderr.write(
|
|
21667
|
+
`${RED2}Error:${RESET3} --worlds <dir> or RADIANT_WORLDS required.
|
|
21668
|
+
`
|
|
21669
|
+
);
|
|
21670
|
+
process.exit(1);
|
|
21671
|
+
}
|
|
21672
|
+
const apiKey = process.env.ANTHROPIC_API_KEY;
|
|
21673
|
+
if (!apiKey) {
|
|
21674
|
+
process.stderr.write(
|
|
21675
|
+
`${RED2}Error:${RESET3} ANTHROPIC_API_KEY environment variable not set.
|
|
21676
|
+
${DIM3}Set it to your Anthropic API key to use Radiant's AI features.${RESET3}
|
|
21677
|
+
`
|
|
21678
|
+
);
|
|
21679
|
+
process.exit(1);
|
|
21680
|
+
}
|
|
21681
|
+
let query = args.query;
|
|
21682
|
+
if (!query && args.rest.length > 0) {
|
|
21683
|
+
query = args.rest.join(" ");
|
|
21684
|
+
}
|
|
21685
|
+
if (!query && !process.stdin.isTTY) {
|
|
21686
|
+
query = (0, import_fs13.readFileSync)(0, "utf-8").trim();
|
|
21687
|
+
}
|
|
21688
|
+
if (!query) {
|
|
21689
|
+
process.stderr.write(
|
|
21690
|
+
`${RED2}Error:${RESET3} No query provided.
|
|
21691
|
+
${DIM3}Use --query "...", pass as trailing args, or pipe via stdin.${RESET3}
|
|
21692
|
+
`
|
|
21693
|
+
);
|
|
21694
|
+
process.exit(1);
|
|
21695
|
+
}
|
|
21696
|
+
const worldmodelContent = loadWorldmodelContent(worldsPath);
|
|
21697
|
+
const model = args.model ?? process.env.RADIANT_MODEL;
|
|
21698
|
+
const ai = createAnthropicAI(apiKey, model || void 0);
|
|
21699
|
+
process.stderr.write(
|
|
21700
|
+
`${DIM3}Worlds: ${worldsPath}${RESET3}
|
|
21701
|
+
${DIM3}Lens: ${lensId}${RESET3}
|
|
21702
|
+
${DIM3}Model: ${model ?? "claude-sonnet-4-20250514 (default)"}${RESET3}
|
|
21703
|
+
|
|
21704
|
+
`
|
|
21705
|
+
);
|
|
21706
|
+
const result = await think({
|
|
21707
|
+
worldmodelContent,
|
|
21708
|
+
lensId,
|
|
21709
|
+
query,
|
|
21710
|
+
ai
|
|
21711
|
+
});
|
|
21712
|
+
if (!result.voiceClean) {
|
|
21713
|
+
process.stderr.write(
|
|
21714
|
+
`${YELLOW3}Voice violations detected (${result.voiceViolations.length}):${RESET3}
|
|
21715
|
+
`
|
|
21716
|
+
);
|
|
21717
|
+
for (const v of result.voiceViolations) {
|
|
21718
|
+
process.stderr.write(
|
|
21719
|
+
` ${YELLOW3}\u26A0${RESET3} "${v.phrase}" at offset ${v.offset}
|
|
21720
|
+
`
|
|
21721
|
+
);
|
|
21722
|
+
}
|
|
21723
|
+
process.stderr.write("\n");
|
|
21724
|
+
}
|
|
21725
|
+
if (args.json) {
|
|
21726
|
+
process.stdout.write(
|
|
21727
|
+
JSON.stringify(
|
|
21728
|
+
{
|
|
21729
|
+
response: result.response,
|
|
21730
|
+
lens: result.lens,
|
|
21731
|
+
voiceClean: result.voiceClean,
|
|
21732
|
+
voiceViolations: result.voiceViolations
|
|
21733
|
+
},
|
|
21734
|
+
null,
|
|
21735
|
+
2
|
|
21736
|
+
) + "\n"
|
|
21737
|
+
);
|
|
21738
|
+
} else {
|
|
21739
|
+
process.stdout.write(result.response + "\n");
|
|
21740
|
+
}
|
|
21741
|
+
if (!result.voiceClean) {
|
|
21742
|
+
process.exit(2);
|
|
21743
|
+
}
|
|
21744
|
+
}
|
|
21745
|
+
async function cmdEmergent(args) {
|
|
21746
|
+
const scopeStr = args.rest[0];
|
|
21747
|
+
if (!scopeStr) {
|
|
21748
|
+
process.stderr.write(
|
|
21749
|
+
`${RED2}Error:${RESET3} Scope required. Usage: neuroverse radiant emergent <owner/repo>
|
|
21750
|
+
`
|
|
21751
|
+
);
|
|
21752
|
+
process.exit(1);
|
|
21753
|
+
}
|
|
21754
|
+
const scope = parseRepoScope(scopeStr);
|
|
21755
|
+
const lensId = args.lens ?? process.env.RADIANT_LENS;
|
|
21756
|
+
if (!lensId) {
|
|
21757
|
+
process.stderr.write(
|
|
21758
|
+
`${RED2}Error:${RESET3} --lens <id> or RADIANT_LENS required.
|
|
21759
|
+
${DIM3}Available lenses: ${listLenses().join(", ")}${RESET3}
|
|
21760
|
+
`
|
|
21761
|
+
);
|
|
21762
|
+
process.exit(1);
|
|
21763
|
+
}
|
|
21764
|
+
const worldsPath = args.worlds ?? process.env.RADIANT_WORLDS;
|
|
21765
|
+
if (!worldsPath) {
|
|
21766
|
+
process.stderr.write(
|
|
21767
|
+
`${RED2}Error:${RESET3} --worlds <dir> or RADIANT_WORLDS required.
|
|
21768
|
+
`
|
|
21769
|
+
);
|
|
21770
|
+
process.exit(1);
|
|
21771
|
+
}
|
|
21772
|
+
const anthropicKey = process.env.ANTHROPIC_API_KEY;
|
|
21773
|
+
if (!anthropicKey) {
|
|
21774
|
+
process.stderr.write(
|
|
21775
|
+
`${RED2}Error:${RESET3} ANTHROPIC_API_KEY environment variable not set.
|
|
21776
|
+
`
|
|
21777
|
+
);
|
|
21778
|
+
process.exit(1);
|
|
21779
|
+
}
|
|
21780
|
+
const githubToken = process.env.GITHUB_TOKEN;
|
|
21781
|
+
if (!githubToken) {
|
|
21782
|
+
process.stderr.write(
|
|
21783
|
+
`${RED2}Error:${RESET3} GITHUB_TOKEN environment variable not set.
|
|
21784
|
+
${DIM3}Set it to a GitHub PAT with repo read access.${RESET3}
|
|
21785
|
+
`
|
|
21786
|
+
);
|
|
21787
|
+
process.exit(1);
|
|
21788
|
+
}
|
|
21789
|
+
const worldmodelContent = loadWorldmodelContent(worldsPath);
|
|
21790
|
+
const model = args.model ?? process.env.RADIANT_MODEL;
|
|
21791
|
+
const ai = createAnthropicAI(anthropicKey, model || void 0);
|
|
21792
|
+
const exocortexPath = args.exocortex ?? process.env.RADIANT_EXOCORTEX;
|
|
21793
|
+
let exocortexStatus = "not loaded";
|
|
21794
|
+
if (exocortexPath) {
|
|
21795
|
+
const ctx = readExocortex(exocortexPath);
|
|
21796
|
+
exocortexStatus = summarizeExocortex(ctx);
|
|
21797
|
+
}
|
|
21798
|
+
process.stderr.write(
|
|
21799
|
+
`${DIM3}Scope: ${scope.owner}/${scope.repo}${RESET3}
|
|
21800
|
+
${DIM3}Lens: ${lensId}${RESET3}
|
|
21801
|
+
${DIM3}Model: ${model ?? "claude-sonnet-4-20250514 (default)"}${RESET3}
|
|
21802
|
+
${DIM3}ExoCortex: ${exocortexStatus}${RESET3}
|
|
21803
|
+
${DIM3}Fetching activity...${RESET3}
|
|
21804
|
+
|
|
21805
|
+
`
|
|
21806
|
+
);
|
|
21807
|
+
const result = await emergent({
|
|
21808
|
+
scope,
|
|
21809
|
+
githubToken,
|
|
21810
|
+
worldmodelContent,
|
|
21811
|
+
lensId,
|
|
21812
|
+
ai,
|
|
21813
|
+
windowDays: 14,
|
|
21814
|
+
exocortexPath: exocortexPath || void 0
|
|
21815
|
+
});
|
|
21816
|
+
if (!result.voiceClean) {
|
|
21817
|
+
process.stderr.write(
|
|
21818
|
+
`${YELLOW3}Voice violations (${result.voiceViolations.length}):${RESET3}
|
|
21819
|
+
`
|
|
21820
|
+
);
|
|
21821
|
+
for (const v of result.voiceViolations) {
|
|
21822
|
+
process.stderr.write(
|
|
21823
|
+
` ${YELLOW3}\u26A0${RESET3} "${v.phrase}" at offset ${v.offset}
|
|
21824
|
+
`
|
|
21825
|
+
);
|
|
21826
|
+
}
|
|
21827
|
+
process.stderr.write("\n");
|
|
21828
|
+
}
|
|
21829
|
+
if (args.json) {
|
|
21830
|
+
process.stdout.write(
|
|
21831
|
+
JSON.stringify(
|
|
21832
|
+
{
|
|
21833
|
+
text: result.text,
|
|
21834
|
+
frontmatter: result.frontmatter,
|
|
21835
|
+
scores: result.scores,
|
|
21836
|
+
eventCount: result.eventCount,
|
|
21837
|
+
voiceClean: result.voiceClean
|
|
21838
|
+
},
|
|
21839
|
+
null,
|
|
21840
|
+
2
|
|
21841
|
+
) + "\n"
|
|
21842
|
+
);
|
|
21843
|
+
} else {
|
|
21844
|
+
process.stdout.write(result.text + "\n");
|
|
21845
|
+
}
|
|
21846
|
+
}
|
|
21847
|
+
async function cmdLenses(args) {
|
|
21848
|
+
const subSub = args.rest[0];
|
|
21849
|
+
if (!subSub || subSub === "list") {
|
|
21850
|
+
const ids = listLenses();
|
|
21851
|
+
if (ids.length === 0) {
|
|
21852
|
+
process.stdout.write("No lenses registered.\n");
|
|
21853
|
+
} else {
|
|
21854
|
+
for (const id of ids) {
|
|
21855
|
+
process.stdout.write(`${id}
|
|
21856
|
+
`);
|
|
21857
|
+
}
|
|
21858
|
+
}
|
|
21859
|
+
return;
|
|
21860
|
+
}
|
|
21861
|
+
if (subSub === "describe") {
|
|
21862
|
+
const { getLens: getLens3 } = await Promise.resolve().then(() => (init_lenses(), lenses_exports));
|
|
21863
|
+
const id = args.rest[1];
|
|
21864
|
+
if (!id) {
|
|
21865
|
+
process.stderr.write(`${RED2}Error:${RESET3} Lens id required.
|
|
21866
|
+
`);
|
|
21867
|
+
process.exit(1);
|
|
21868
|
+
}
|
|
21869
|
+
const lens = getLens3(id);
|
|
21870
|
+
if (!lens) {
|
|
21871
|
+
process.stderr.write(
|
|
21872
|
+
`${RED2}Error:${RESET3} Lens "${id}" not found.
|
|
21873
|
+
${DIM3}Available: ${listLenses().join(", ")}${RESET3}
|
|
21874
|
+
`
|
|
21875
|
+
);
|
|
21876
|
+
process.exit(1);
|
|
21877
|
+
}
|
|
21878
|
+
process.stdout.write(`${BOLD3}${lens.name}${RESET3}
|
|
21879
|
+
`);
|
|
21880
|
+
process.stdout.write(`${lens.description}
|
|
21881
|
+
|
|
21882
|
+
`);
|
|
21883
|
+
process.stdout.write(
|
|
21884
|
+
`${BOLD3}Domains:${RESET3} ${lens.primary_frame.domains.join(", ")}
|
|
21885
|
+
`
|
|
21886
|
+
);
|
|
21887
|
+
process.stdout.write(
|
|
21888
|
+
`${BOLD3}Overlaps:${RESET3} ${lens.primary_frame.overlaps.map((o) => o.emergent_state).join(", ")}
|
|
21889
|
+
`
|
|
21890
|
+
);
|
|
21891
|
+
process.stdout.write(
|
|
21892
|
+
`${BOLD3}Center:${RESET3} ${lens.primary_frame.center_identity}
|
|
21893
|
+
`
|
|
21894
|
+
);
|
|
21895
|
+
process.stdout.write(
|
|
21896
|
+
`${BOLD3}Forbidden phrases:${RESET3} ${lens.forbidden_phrases.length}
|
|
21897
|
+
`
|
|
21898
|
+
);
|
|
21899
|
+
process.stdout.write(
|
|
21900
|
+
`${BOLD3}Vocabulary terms:${RESET3} ${lens.vocabulary.proper_nouns.length} proper nouns, ${Object.keys(lens.vocabulary.preferred).length} substitutions
|
|
21901
|
+
`
|
|
21902
|
+
);
|
|
21903
|
+
process.stdout.write(
|
|
21904
|
+
`${BOLD3}Exemplars:${RESET3} ${lens.exemplar_refs.length}
|
|
21905
|
+
`
|
|
21906
|
+
);
|
|
21907
|
+
return;
|
|
21908
|
+
}
|
|
21909
|
+
process.stderr.write(
|
|
21910
|
+
`${RED2}Error:${RESET3} Unknown lenses subcommand "${subSub}".
|
|
21911
|
+
${DIM3}Use: lenses list | lenses describe <id>${RESET3}
|
|
21912
|
+
`
|
|
21913
|
+
);
|
|
21914
|
+
process.exit(1);
|
|
21915
|
+
}
|
|
21916
|
+
async function main34(argv) {
|
|
21917
|
+
const args = parseArgs27(argv);
|
|
21918
|
+
if (args.help || !args.subcommand) {
|
|
21919
|
+
process.stdout.write(USAGE10 + "\n");
|
|
21920
|
+
return;
|
|
21921
|
+
}
|
|
21922
|
+
switch (args.subcommand) {
|
|
21923
|
+
case "think":
|
|
21924
|
+
return cmdThink(args);
|
|
21925
|
+
case "lenses":
|
|
21926
|
+
return cmdLenses(args);
|
|
21927
|
+
case "emergent":
|
|
21928
|
+
return cmdEmergent(args);
|
|
21929
|
+
case "decision":
|
|
21930
|
+
case "signals":
|
|
21931
|
+
case "drift":
|
|
21932
|
+
case "evolve":
|
|
21933
|
+
process.stderr.write(
|
|
21934
|
+
`${DIM3}neuroverse radiant ${args.subcommand} is not yet implemented.${RESET3}
|
|
21935
|
+
`
|
|
21936
|
+
);
|
|
21937
|
+
process.exit(1);
|
|
21938
|
+
break;
|
|
21939
|
+
default:
|
|
21940
|
+
process.stderr.write(
|
|
21941
|
+
`${RED2}Unknown radiant subcommand: "${args.subcommand}"${RESET3}
|
|
21942
|
+
|
|
21943
|
+
`
|
|
21944
|
+
);
|
|
21945
|
+
process.stdout.write(USAGE10 + "\n");
|
|
21946
|
+
process.exit(1);
|
|
21947
|
+
}
|
|
21948
|
+
}
|
|
21949
|
+
var import_fs13, import_path14, RED2, DIM3, BOLD3, YELLOW3, RESET3, USAGE10;
|
|
21950
|
+
var init_radiant = __esm({
|
|
21951
|
+
"src/cli/radiant.ts"() {
|
|
21952
|
+
"use strict";
|
|
21953
|
+
import_fs13 = require("fs");
|
|
21954
|
+
import_path14 = require("path");
|
|
21955
|
+
init_think();
|
|
21956
|
+
init_emergent();
|
|
21957
|
+
init_ai();
|
|
21958
|
+
init_scopes();
|
|
21959
|
+
init_exocortex();
|
|
21960
|
+
init_lenses();
|
|
21961
|
+
RED2 = "\x1B[31m";
|
|
21962
|
+
DIM3 = "\x1B[2m";
|
|
21963
|
+
BOLD3 = "\x1B[1m";
|
|
21964
|
+
YELLOW3 = "\x1B[33m";
|
|
21965
|
+
RESET3 = "\x1B[0m";
|
|
21966
|
+
USAGE10 = `
|
|
21967
|
+
${BOLD3}neuroverse radiant${RESET3} \u2014 behavioral intelligence for collaboration systems
|
|
21968
|
+
|
|
21969
|
+
${BOLD3}Stage A (voice layer):${RESET3}
|
|
21970
|
+
think Send a query through the worldmodel + lens \u2192 AI-framed response
|
|
21971
|
+
|
|
21972
|
+
${BOLD3}Stage B (behavioral analysis, coming soon):${RESET3}
|
|
21973
|
+
emergent Pattern read on recent activity
|
|
21974
|
+
decision Evaluate a specific artifact against the worldmodel
|
|
21975
|
+
signals Extract signal matrix (debug)
|
|
21976
|
+
lenses List or describe available rendering lenses
|
|
21977
|
+
|
|
21978
|
+
${BOLD3}Usage:${RESET3}
|
|
21979
|
+
neuroverse radiant think --lens auki-builder --worlds ./worlds/ --query "What is our biggest risk?"
|
|
21980
|
+
neuroverse radiant think --lens auki-builder --worlds ./worlds/ < prompt.txt
|
|
21981
|
+
neuroverse radiant emergent aukiverse/posemesh --lens auki-builder --worlds ./worlds/
|
|
21982
|
+
neuroverse radiant emergent aukiverse/posemesh --lens auki-builder --worlds ./worlds/ --exocortex ~/exocortex/
|
|
21983
|
+
neuroverse radiant lenses list
|
|
21984
|
+
neuroverse radiant lenses describe auki-builder
|
|
21985
|
+
|
|
21986
|
+
${BOLD3}Environment:${RESET3}
|
|
21987
|
+
ANTHROPIC_API_KEY Required for AI commands (think, emergent, decision)
|
|
21988
|
+
RADIANT_WORLDS Default worlds directory (overridden by --worlds)
|
|
21989
|
+
RADIANT_LENS Default lens id (overridden by --lens)
|
|
21990
|
+
RADIANT_MODEL AI model override (default: claude-sonnet-4-20250514)
|
|
21991
|
+
RADIANT_EXOCORTEX Default exocortex directory (overridden by --exocortex)
|
|
21992
|
+
`.trim();
|
|
21993
|
+
}
|
|
21994
|
+
});
|
|
21995
|
+
|
|
19870
21996
|
// src/cli/neuroverse.ts
|
|
19871
|
-
var
|
|
19872
|
-
neuroverse \u2014
|
|
21997
|
+
var USAGE11 = `
|
|
21998
|
+
neuroverse \u2014 Behavioral governance for AI systems.
|
|
19873
21999
|
|
|
19874
|
-
|
|
22000
|
+
Behavioral modeling:
|
|
22001
|
+
worldmodel Build behavioral models (init, validate, build, explain, infer)
|
|
22002
|
+
radiant Behavioral intelligence for collaboration (think, emergent, lenses)
|
|
22003
|
+
lens Manage behavioral lenses (list, preview, compile, compare, add)
|
|
22004
|
+
|
|
22005
|
+
Runtime governance:
|
|
22006
|
+
guard Evaluate events against a world (stdin \u2192 stdout)
|
|
22007
|
+
plan Plan enforcement (compile, check, status, advance, derive)
|
|
22008
|
+
run Governed runtime (pipe mode or interactive chat)
|
|
22009
|
+
mcp MCP governance server (for Claude, Cursor, etc.)
|
|
22010
|
+
|
|
22011
|
+
World management:
|
|
22012
|
+
world Manage worlds (status, diff, snapshot, rollback, list)
|
|
19875
22013
|
add Add a guard, rule, or invariant to a world
|
|
19876
|
-
build Build a world from markdown (derive + compile
|
|
19877
|
-
explain Human-readable summary of a compiled world
|
|
19878
|
-
simulate Step-by-step state evolution
|
|
19879
|
-
improve Actionable suggestions for strengthening a world
|
|
19880
|
-
init Scaffold a new .nv-world.md template
|
|
19881
|
-
init-world Generate a governed world from a template (e.g., autoresearch)
|
|
19882
|
-
infer-world Scan a repo and infer a governance world from its structure
|
|
22014
|
+
build Build a world from markdown (derive + compile)
|
|
19883
22015
|
validate Static analysis on world files
|
|
19884
|
-
|
|
22016
|
+
explain Human-readable summary of a compiled world
|
|
22017
|
+
|
|
22018
|
+
Testing:
|
|
19885
22019
|
test Run guard simulation suite against a world
|
|
19886
22020
|
redteam Adversarial containment testing (agent escape detection)
|
|
19887
|
-
demo Interactive governance demo (flow viz + simulation)
|
|
19888
22021
|
doctor Environment sanity check
|
|
19889
|
-
|
|
19890
|
-
|
|
19891
|
-
run Governed runtime (pipe mode or interactive chat)
|
|
19892
|
-
mcp MCP governance server (for Claude, Cursor, etc.)
|
|
19893
|
-
worlds List available worlds (alias for world list)
|
|
19894
|
-
trace Runtime action audit log
|
|
19895
|
-
impact Counterfactual governance impact report
|
|
19896
|
-
decision-flow Intent \u2192 Rule \u2192 Outcome visualization (behavioral governance)
|
|
19897
|
-
equity-penalties Fortune 500 equity PENALIZE/REWARD simulation
|
|
19898
|
-
world World management (status, diff, snapshot, rollback)
|
|
22022
|
+
|
|
22023
|
+
Administration:
|
|
19899
22024
|
keygen Generate Ed25519 signing keypair
|
|
19900
|
-
sign Sign a world artifact
|
|
22025
|
+
sign Sign a world artifact
|
|
19901
22026
|
verify Verify a signed world artifact
|
|
19902
22027
|
migrate Migrate world schema between versions
|
|
19903
|
-
derive AI-assisted synthesis of .nv-world.md from markdown
|
|
19904
|
-
bootstrap Compile .nv-world.md \u2192 world JSON files
|
|
19905
22028
|
configure-ai Configure AI provider credentials
|
|
19906
|
-
configure-world Interactive wizard: define your system in plain language
|
|
19907
|
-
lens Manage behavioral lenses (list, preview, compile, compare, add)
|
|
19908
|
-
worldmodel Behavioral world model builder (init, validate, build, explain)
|
|
19909
22029
|
|
|
19910
|
-
|
|
19911
|
-
neuroverse
|
|
19912
|
-
neuroverse
|
|
19913
|
-
neuroverse
|
|
19914
|
-
|
|
19915
|
-
neuroverse
|
|
19916
|
-
|
|
19917
|
-
neuroverse simulate <world-path-or-id> [--steps N] [--set key=value] [--profile name]
|
|
19918
|
-
neuroverse improve <world-path-or-id> [--json]
|
|
19919
|
-
neuroverse init [--name "World Name"] [--output path]
|
|
19920
|
-
neuroverse init-world autoresearch [--context "topic"] [--dataset "name"] [--goal "goal"]
|
|
19921
|
-
neuroverse infer-world ./repo [--output path] [--json] [--dry-run]
|
|
19922
|
-
neuroverse validate --world <dir> [--format full|summary|findings]
|
|
19923
|
-
neuroverse guard --world <dir> [--trace] [--level basic|standard|strict]
|
|
19924
|
-
neuroverse test --world <dir> [--fuzz] [--count N]
|
|
19925
|
-
neuroverse redteam --world <dir> [--level basic|standard|strict]
|
|
19926
|
-
neuroverse demo [--world social-media] [--port 3456] [--no-browser]
|
|
19927
|
-
neuroverse doctor [--world <dir>] [--json]
|
|
19928
|
-
neuroverse playground --world <dir> [--port 4242]
|
|
19929
|
-
neuroverse trace [--log <path>] [--summary] [--filter BLOCK] [--last 20]
|
|
19930
|
-
neuroverse impact [--log <path>] [--json]
|
|
19931
|
-
neuroverse world status <path>
|
|
19932
|
-
neuroverse world diff <path1> <path2>
|
|
19933
|
-
neuroverse world snapshot <path>
|
|
19934
|
-
neuroverse world rollback <path>
|
|
19935
|
-
neuroverse derive --input <path> [--output <path>] [--dry-run]
|
|
19936
|
-
neuroverse bootstrap --input <.md> --output <dir> [--validate]
|
|
19937
|
-
neuroverse decision-flow [--log <path>] [--json]
|
|
19938
|
-
neuroverse equity-penalties --world <dir> [--agents N] [--rounds N] [--json]
|
|
19939
|
-
neuroverse configure-ai --provider <name> --model <name> --api-key <key>
|
|
19940
|
-
neuroverse configure-world [--output <dir>]
|
|
19941
|
-
neuroverse keygen [--output <dir>] [--name <name>]
|
|
19942
|
-
neuroverse sign --world <dir> [--key <path>]
|
|
19943
|
-
neuroverse verify --world <dir> [--key <path>]
|
|
19944
|
-
neuroverse migrate --world <dir> [--dry-run] [--backup]
|
|
19945
|
-
neuroverse lens list [--world <dir>] [--json]
|
|
19946
|
-
neuroverse lens preview <id> [--world <dir>]
|
|
19947
|
-
neuroverse lens compile <id,...> [--world <dir>] [--role <role>] [--json]
|
|
19948
|
-
neuroverse lens compare --input "text" --lenses stoic,coach,calm
|
|
19949
|
-
neuroverse lens add --world <dir> --name "Name" --tagline "..." [options]
|
|
19950
|
-
neuroverse worldmodel init --name "My Model"
|
|
19951
|
-
neuroverse worldmodel build ./model.worldmodel.md --output ./world/
|
|
19952
|
-
neuroverse worldmodel explain ./model.worldmodel.md
|
|
22030
|
+
Quick start:
|
|
22031
|
+
neuroverse worldmodel init --name "My Model" Create a behavioral model
|
|
22032
|
+
neuroverse worldmodel build ./model.worldmodel.md Compile it
|
|
22033
|
+
neuroverse radiant think --lens auki-builder \\
|
|
22034
|
+
--worlds ./worlds/ --query "..." Ask through the model
|
|
22035
|
+
neuroverse radiant emergent owner/repo \\
|
|
22036
|
+
--lens auki-builder --worlds ./worlds/ Behavioral read on a repo
|
|
19953
22037
|
|
|
19954
|
-
|
|
19955
|
-
neuroverse
|
|
19956
|
-
neuroverse explain inherited_silence
|
|
19957
|
-
neuroverse simulate inherited_silence --steps 5
|
|
19958
|
-
neuroverse improve inherited_silence
|
|
19959
|
-
neuroverse build ./docs/ --output ./my-world/
|
|
19960
|
-
neuroverse init --name "Customer Service Governance"
|
|
19961
|
-
neuroverse validate --world ./world/ --format summary
|
|
19962
|
-
echo '{"intent":"delete user data"}' | neuroverse guard --world ./world/ --trace
|
|
22038
|
+
Governance:
|
|
22039
|
+
echo '{"intent":"..."}' | neuroverse guard --world ./world/
|
|
19963
22040
|
neuroverse plan compile plan.md --output plan.json
|
|
19964
|
-
echo '{"intent":"write blog"}' | neuroverse plan check --plan plan.json
|
|
19965
|
-
neuroverse plan status --plan plan.json
|
|
19966
|
-
neuroverse plan advance write_blog_post --plan plan.json
|
|
19967
|
-
neuroverse plan derive plan.md --output ./derived-world/
|
|
19968
22041
|
neuroverse run --pipe --world ./world/ --plan plan.json
|
|
19969
|
-
neuroverse run --interactive --world ./world/ --provider openai
|
|
19970
22042
|
neuroverse mcp --world ./world/ --plan plan.json
|
|
19971
22043
|
neuroverse test --world ./world/ --fuzz --count 50
|
|
19972
|
-
neuroverse redteam --world ./world/ --level strict
|
|
19973
|
-
neuroverse doctor
|
|
19974
|
-
neuroverse playground --world ./world/
|
|
19975
22044
|
`.trim();
|
|
19976
|
-
async function
|
|
22045
|
+
async function main35() {
|
|
19977
22046
|
const args = process.argv.slice(2);
|
|
19978
22047
|
const command = args[0];
|
|
19979
22048
|
const subArgs = args.slice(1);
|
|
@@ -19999,6 +22068,7 @@ async function main34() {
|
|
|
19999
22068
|
return improveMain(subArgs);
|
|
20000
22069
|
}
|
|
20001
22070
|
case "init": {
|
|
22071
|
+
process.stderr.write("\x1B[2mNote: `neuroverse init` scaffolds .nv-world.md files. For behavioral models, use `neuroverse worldmodel init` instead.\x1B[0m\n");
|
|
20002
22072
|
const { main: initMain } = await Promise.resolve().then(() => (init_init(), init_exports));
|
|
20003
22073
|
return initMain(subArgs);
|
|
20004
22074
|
}
|
|
@@ -20011,6 +22081,7 @@ async function main34() {
|
|
|
20011
22081
|
return inferWorldMain(subArgs);
|
|
20012
22082
|
}
|
|
20013
22083
|
case "bootstrap": {
|
|
22084
|
+
process.stderr.write("\x1B[2mNote: `neuroverse bootstrap` compiles .nv-world.md files. For behavioral models, use `neuroverse worldmodel build` instead.\x1B[0m\n");
|
|
20014
22085
|
const { main: bootstrapMain } = await Promise.resolve().then(() => (init_bootstrap(), bootstrap_exports));
|
|
20015
22086
|
return bootstrapMain(subArgs);
|
|
20016
22087
|
}
|
|
@@ -20075,6 +22146,7 @@ async function main34() {
|
|
|
20075
22146
|
return worldMain(subArgs);
|
|
20076
22147
|
}
|
|
20077
22148
|
case "derive": {
|
|
22149
|
+
process.stderr.write("\x1B[2mNote: `neuroverse derive` is included in `neuroverse build`. Consider using `neuroverse build` for the combined derive + compile step.\x1B[0m\n");
|
|
20078
22150
|
const { main: deriveMain } = await Promise.resolve().then(() => (init_derive(), derive_exports));
|
|
20079
22151
|
return deriveMain(subArgs);
|
|
20080
22152
|
}
|
|
@@ -20107,6 +22179,7 @@ async function main34() {
|
|
|
20107
22179
|
return configureAiMain(subArgs);
|
|
20108
22180
|
}
|
|
20109
22181
|
case "configure-world": {
|
|
22182
|
+
process.stderr.write("\x1B[2mNote: For behavioral models, use `neuroverse worldmodel init` instead. `configure-world` is the interactive wizard for .nv-world.md files.\x1B[0m\n");
|
|
20110
22183
|
const { main: configureWorldMain } = await Promise.resolve().then(() => (init_configure_world(), configure_world_exports));
|
|
20111
22184
|
return configureWorldMain(subArgs);
|
|
20112
22185
|
}
|
|
@@ -20118,11 +22191,15 @@ async function main34() {
|
|
|
20118
22191
|
const { main: worldmodelMain } = await Promise.resolve().then(() => (init_worldmodel(), worldmodel_exports));
|
|
20119
22192
|
return worldmodelMain(subArgs);
|
|
20120
22193
|
}
|
|
22194
|
+
case "radiant": {
|
|
22195
|
+
const { main: radiantMain } = await Promise.resolve().then(() => (init_radiant(), radiant_exports));
|
|
22196
|
+
return radiantMain(subArgs);
|
|
22197
|
+
}
|
|
20121
22198
|
case "--help":
|
|
20122
22199
|
case "-h":
|
|
20123
22200
|
case "help":
|
|
20124
22201
|
case void 0: {
|
|
20125
|
-
process.stdout.write(
|
|
22202
|
+
process.stdout.write(USAGE11 + "\n");
|
|
20126
22203
|
process.exit(0);
|
|
20127
22204
|
break;
|
|
20128
22205
|
}
|
|
@@ -20130,12 +22207,12 @@ async function main34() {
|
|
|
20130
22207
|
process.stderr.write(`Unknown command: "${command}"
|
|
20131
22208
|
|
|
20132
22209
|
`);
|
|
20133
|
-
process.stdout.write(
|
|
22210
|
+
process.stdout.write(USAGE11 + "\n");
|
|
20134
22211
|
process.exit(1);
|
|
20135
22212
|
}
|
|
20136
22213
|
}
|
|
20137
22214
|
}
|
|
20138
|
-
|
|
22215
|
+
main35().catch((e) => {
|
|
20139
22216
|
process.stderr.write(`Fatal: ${e}
|
|
20140
22217
|
`);
|
|
20141
22218
|
process.exit(3);
|