@neuroverseos/governance 0.6.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2058 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __esm = (fn, res) => function __init() {
7
+ return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
8
+ };
9
+ var __export = (target, all) => {
10
+ for (var name in all)
11
+ __defProp(target, name, { get: all[name], enumerable: true });
12
+ };
13
+ var __copyProps = (to, from, except, desc) => {
14
+ if (from && typeof from === "object" || typeof from === "function") {
15
+ for (let key of __getOwnPropNames(from))
16
+ if (!__hasOwnProp.call(to, key) && key !== except)
17
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
18
+ }
19
+ return to;
20
+ };
21
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
22
+
23
+ // src/radiant/lenses/auki-builder.ts
24
+ function aukiBuilderRewrite(pattern) {
25
+ if (pattern.evidence.cited_invariant) {
26
+ return {
27
+ ...pattern,
28
+ framing: "invariant pressure",
29
+ emphasis: "worldmodel invariant cited by this observation \u2014 surface the cross-reference",
30
+ compress: true
31
+ };
32
+ }
33
+ if (pattern.type === "candidate") {
34
+ return {
35
+ ...pattern,
36
+ framing: "emergent observation (not yet in worldmodel)",
37
+ emphasis: "candidate pattern \u2014 surface the vanguard-domain analysis (which domain activated this?)",
38
+ compress: true
39
+ };
40
+ }
41
+ return {
42
+ ...pattern,
43
+ framing: "system-level consequence",
44
+ emphasis: "coordination + leverage",
45
+ compress: true
46
+ };
47
+ }
48
+ var AUKI_VANGUARD_FRAME, AUKI_VOCABULARY, AUKI_VOICE, AUKI_FORBIDDEN_PHRASES, AUKI_PREFERRED_PATTERNS, AUKI_STRATEGIC_PATTERNS, AUKI_EXEMPLARS, aukiBuilderLens;
49
+ var init_auki_builder = __esm({
50
+ "src/radiant/lenses/auki-builder.ts"() {
51
+ "use strict";
52
+ AUKI_VANGUARD_FRAME = {
53
+ domains: [
54
+ "future-foresight",
55
+ "narrative-dynamics",
56
+ "shared-prosperity"
57
+ ],
58
+ overlaps: [
59
+ {
60
+ domains: ["future-foresight", "narrative-dynamics"],
61
+ emergent_state: "Inspiration",
62
+ description: "Visionary leaders inspire action by painting a vivid picture of a better future, helping people understand how to get there together. Emerges when long-range thinking meets language that rallies."
63
+ },
64
+ {
65
+ domains: ["narrative-dynamics", "shared-prosperity"],
66
+ emergent_state: "Trust",
67
+ description: "Built through authentic storytelling and consistent delivery on promises, creating a community where contributors feel secure in their contributions. Emerges when clear intent meets fair distribution \u2014 coalitions form here."
68
+ },
69
+ {
70
+ domains: ["shared-prosperity", "future-foresight"],
71
+ emergent_state: "Hope",
72
+ description: "Propels decentralized communities toward a collective future where resources are equitably distributed and success is shared by all. Emerges when long-term infrastructure is architected for collective benefit \u2014 the DePIN / Intercognitive posture."
73
+ }
74
+ ],
75
+ center_identity: "Collective Vanguard Leader",
76
+ evaluation_questions: [
77
+ "What long-range architectural thinking is present? Systems design, scenario planning, critical thinking, ethical judgment \u2014 which of these is visible, which is weak?",
78
+ "What communication and meaning-making is happening? Storytelling, cultural sensitivity, audience engagement, persuasive writing \u2014 who is telling the story of how the pieces connect?",
79
+ "What collaborative and fairness work is happening? Stakeholder management, partnership development, incentive alignment, community building \u2014 who is building coalitions and making sure value flows equitably?",
80
+ "Which overlap states surface \u2014 Inspiration (vision + narrative), Trust (narrative + fairness), Hope (fairness + long-term thinking)?",
81
+ "Is the integration complete (Collective Vanguard Leader manifests across all three dimensions) or is one dimension absent / weak?",
82
+ "If one dimension is weak, what specific skill inside it is the lowest-friction activation point?"
83
+ ],
84
+ scoring_rubric: `For any Auki activity, identify which specific skills are strongly present, which are weak, which are absent. Cite specific evidence for each. Name the overlap emergent states that surface using their plain-English names (Inspiration, Trust, Hope). Do not surface the bucket names (Future Foresight, Narrative Dynamics, Shared Prosperity) in the output \u2014 those are internal reasoning scaffolds, not reader-facing labels. Translate bucket-level findings into skill-level observations: not "Future Foresight is present" but "the architectural thinking is strong \u2014 the systems design is clear"; not "Shared Prosperity is weak" but "partnership development is missing" or "incentive alignment hasn't been established." Center identity (Collective Vanguard Leader) may be named sparingly, only when all three dimensions fully integrate.`,
85
+ /**
86
+ * The skills inside each domain. These are the OUTPUT-FACING vocabulary —
87
+ * the observable behaviors and capabilities readers understand. When the
88
+ * AI renders findings, it uses these skill names, not the bucket names.
89
+ *
90
+ * From Kirsten\'s original vanguard diagram (see exemplars/vanguard-diagram).
91
+ */
92
+ domain_skills: {
93
+ "future-foresight": [
94
+ "strategic thinking",
95
+ "systems design",
96
+ "scenario planning",
97
+ "futurism and trend analysis",
98
+ "critical thinking",
99
+ "innovative problem-solving",
100
+ "data-driven decision-making",
101
+ "ethical judgment and governance",
102
+ "risk assessment and mitigation",
103
+ "curiosity and open-mindedness"
104
+ ],
105
+ "narrative-dynamics": [
106
+ "storytelling and narrative crafting",
107
+ "behavioral psychology and memetics",
108
+ "emotional intelligence",
109
+ "communication and presentation skills",
110
+ "cultural sensitivity and adaptation",
111
+ "social media and viral messaging strategy",
112
+ "brand building and positioning",
113
+ "persuasive writing",
114
+ "visualization and design thinking",
115
+ "audience analysis and engagement"
116
+ ],
117
+ "shared-prosperity": [
118
+ "stakeholder management",
119
+ "collaborative leadership",
120
+ "conflict resolution and mediation",
121
+ "economic and tokenomic design",
122
+ "incentive alignment",
123
+ "community building and management",
124
+ "inclusivity and equity advocacy",
125
+ "partnership development",
126
+ "transparency and accountability",
127
+ "negotiation and diplomacy"
128
+ ]
129
+ },
130
+ /**
131
+ * The translation rule: bucket names stay internal; skills + overlap
132
+ * state names surface in output. This is enforced by both the
133
+ * output-directive (guidance to the AI) and the forbidden-phrases list
134
+ * (renderer-level rejection of any output leaking bucket names).
135
+ */
136
+ output_translation: {
137
+ never_surface_in_output: [
138
+ "Future Foresight",
139
+ "Narrative Dynamics",
140
+ "Shared Prosperity"
141
+ ],
142
+ surface_freely: [
143
+ "Inspiration",
144
+ "Trust",
145
+ "Hope"
146
+ // plus any specific skill name from domain_skills above
147
+ ],
148
+ surface_sparingly: ["Collective Vanguard Leader"],
149
+ translation_examples: [
150
+ {
151
+ internal_reasoning: "Future Foresight is strong",
152
+ external_expression: "the architectural thinking is strong; the systems design is clear"
153
+ },
154
+ {
155
+ internal_reasoning: "Shared Prosperity is weak",
156
+ external_expression: "partnership development is missing; no one has established incentive alignment across teams"
157
+ },
158
+ {
159
+ internal_reasoning: "Narrative Dynamics is absent",
160
+ external_expression: "no one is telling the story of how these pieces connect; the audience does not see the shared vision yet"
161
+ }
162
+ ]
163
+ }
164
+ };
165
+ AUKI_VOCABULARY = {
166
+ proper_nouns: [
167
+ "$AUKI",
168
+ "Posemesh",
169
+ "Auki Labs",
170
+ "Posemesh Foundation",
171
+ "Intercognitive Foundation",
172
+ "Intercognitive",
173
+ "Sixth Protocol",
174
+ "Fifth Protocol",
175
+ "DePIN",
176
+ "Cactus",
177
+ "Terri",
178
+ "Mech Jagger",
179
+ "peaq",
180
+ "Mawari",
181
+ "GEODNET",
182
+ "Nine Pillars of AI Accessibility",
183
+ "the real world web",
184
+ "the posemesh"
185
+ ],
186
+ // Generic term → Auki-native replacement
187
+ preferred: {
188
+ device: "participant",
189
+ client: "participant",
190
+ "coordinate system": "domain",
191
+ "QR code for calibration": "portal",
192
+ "work request": "task",
193
+ "location alignment": "calibrate",
194
+ "sensor reading": "observation",
195
+ "physical environment": "environment",
196
+ "the network (public-facing)": "the real world web",
197
+ "the network (technical)": "the posemesh",
198
+ "coordination between devices": "spatial orchestration",
199
+ "buying services": "burning tokens for credits",
200
+ "full autonomy": "the full stack",
201
+ "non-GPS environments": "GPS-denied environments",
202
+ "our partners": "the Intercognitive coalition (Auki, peaq, Mawari, GEODNET)"
203
+ },
204
+ architecture: [
205
+ "domain",
206
+ "domain cluster",
207
+ "domain manager",
208
+ "domain owner",
209
+ "semantic layer",
210
+ "topography layer",
211
+ "rendering layer",
212
+ "partitions",
213
+ "observations",
214
+ "portals",
215
+ "participant",
216
+ "supply participant",
217
+ "demand participant",
218
+ "capabilities",
219
+ "tasks",
220
+ "discovery service",
221
+ "DHT",
222
+ "substrate",
223
+ "spatial orchestration",
224
+ "app-free navigation",
225
+ "marker-free VPS",
226
+ "spatially aware",
227
+ "the stack",
228
+ "the robotics stack",
229
+ "GPS-denied",
230
+ "locomotion",
231
+ "manipulation",
232
+ "spatio-semantic perception",
233
+ "mapping",
234
+ "positioning",
235
+ "hybrid robotics",
236
+ "AI copilot",
237
+ "shared spatial layer"
238
+ ],
239
+ economic: [
240
+ "burn",
241
+ "credit",
242
+ "deflationary mint",
243
+ "reputation",
244
+ "vacancy",
245
+ "treasury",
246
+ "utilization rate",
247
+ "initial supply",
248
+ "total supply",
249
+ "organization",
250
+ "trustless",
251
+ "peer-to-peer transactions",
252
+ "machine passports",
253
+ "machine economy"
254
+ ],
255
+ framing: [
256
+ "machine perception",
257
+ "spatial computing",
258
+ "collaborative perception",
259
+ "cognitive liberty",
260
+ "perception-first",
261
+ "protocol-not-product",
262
+ "sovereignty",
263
+ "decentralization",
264
+ "territory capture",
265
+ "foundations-before-execution",
266
+ "make the world machine-readable",
267
+ "connective tissue between digital and physical",
268
+ "open, permissionless, interoperable, private",
269
+ "skip the bottleneck, ship the leverage",
270
+ "coalition before standard",
271
+ "hybrid over pure",
272
+ "augmentation without surveillance",
273
+ "civilization-scale infrastructure",
274
+ "public good, not proprietary asset",
275
+ "Inspiration",
276
+ "Trust",
277
+ "Hope",
278
+ "Collective Vanguard Leader"
279
+ ],
280
+ // System-internal concepts → plain English for output.
281
+ // Readers don't know Radiant's vocabulary. Speaking it to them is jargon.
282
+ jargon_translations: {
283
+ "worldmodel": "your strategy file",
284
+ "canonical pattern": "something Radiant tracks by name over time",
285
+ "candidate pattern": "something Radiant noticed but hasn't been told to watch for",
286
+ "evidence gate": "how much activity Radiant needs before it speaks",
287
+ "invariant": "a rule you declared non-negotiable",
288
+ "signal extraction": "reading the activity",
289
+ "alignment score": "how aligned the work is with what you said matters",
290
+ "actor domain": "who did the work (a person, an AI, or both together)",
291
+ "presence-based averaging": "only counts what actually happened",
292
+ "drift detection": "noticing when things are shifting from what you said you wanted",
293
+ "lens rewrite": "framing adjustment before output",
294
+ "INSUFFICIENT_EVIDENCE": "not enough to say confidently",
295
+ "UNAVAILABLE": "we can't measure this yet"
296
+ }
297
+ };
298
+ AUKI_VOICE = {
299
+ register: 'diagnosis mode \u2014 compressed, strategic, builder-direct. Closer to the closing paragraph of an Auki year-recap ("2025 was foundations. 2026 is execution.") than to its month-by-month celebration.',
300
+ active_voice: "required",
301
+ specificity: "required",
302
+ hype_vocabulary: "forbidden",
303
+ hedging: "forbidden",
304
+ playfulness: "rare",
305
+ close_with_strategic_frame: "preferred",
306
+ punchline_move: "sparing",
307
+ honesty_about_failure: "required",
308
+ output_translation: `Reason internally through the three-domain frame (Future Foresight, Narrative Dynamics, Shared Prosperity) \u2014 that is the analytical scaffold. Express findings externally in the skills vocabulary INSIDE each domain (e.g. "strategic thinking," "partnership development," "storytelling," "incentive alignment"). Use the overlap state names (Inspiration, Trust, Hope) as plain-English emergent feelings. Do NOT surface the bucket names themselves (Future Foresight, Narrative Dynamics, Shared Prosperity) as labels in output \u2014 they are the model-maker's scaffold, not reader vocabulary. Readers understand skills, not buckets. The bucket names are in the forbidden_phrases list; the renderer will fail output that leaks them. Collective Vanguard Leader may be named sparingly when all three dimensions are fully integrated.`
309
+ };
310
+ AUKI_FORBIDDEN_PHRASES = Object.freeze([
311
+ // Domain bucket names — never surface to readers; translate to skills
312
+ "future foresight",
313
+ "narrative dynamics",
314
+ "shared prosperity",
315
+ // AI-assistant hedging
316
+ "it may be beneficial to consider",
317
+ "there appears to be",
318
+ "one possible interpretation",
319
+ "it might be worth exploring",
320
+ "it might be worth considering",
321
+ "consider whether",
322
+ "it is worth noting",
323
+ "please note that",
324
+ "it should be noted",
325
+ "in conclusion",
326
+ // Corporate / marketing
327
+ "unparalleled",
328
+ "best-in-class",
329
+ "industry-leading",
330
+ "revolutionary",
331
+ "cutting-edge",
332
+ "state-of-the-art",
333
+ "thrilled to announce",
334
+ "excited to share",
335
+ "game-changing",
336
+ "synergy",
337
+ "synergies",
338
+ "stakeholders",
339
+ // too corporate; prefer named actors
340
+ "end-users",
341
+ "value proposition",
342
+ "paradigm shift",
343
+ // Generic motion
344
+ "going forward",
345
+ "moving forward",
346
+ "at the end of the day",
347
+ "touching base",
348
+ "circle back",
349
+ "deep dive",
350
+ "level set",
351
+ "low-hanging fruit"
352
+ ]);
353
+ AUKI_PREFERRED_PATTERNS = Object.freeze([
354
+ // Direct declarative observation
355
+ "[Specific skill] is strong here. [Named evidence].",
356
+ "[Specific skill] is breaking here. [Named evidence].",
357
+ "[Specific skill] is missing. [Named consequence].",
358
+ // Skills-level diagnosis (replaces the bucket-speak pattern)
359
+ "The [specific skill] is clear \u2014 [specific evidence]. But [another specific skill] is missing \u2014 [specific effect]. [Imperative move].",
360
+ "What is missing is [specific skill], not effort.",
361
+ "[Trust | Inspiration | Hope] won't emerge until [skill-A] and [skill-B] happen together.",
362
+ // Imperative move
363
+ "Force [action] or [consequence].",
364
+ "Tighten this or it fragments.",
365
+ "Skip the bottleneck, ship the leverage.",
366
+ "Coalition before standard.",
367
+ // Strategic close — list-becomes-argument (from year-recap)
368
+ "Combine [A, B, C] and suddenly [strategic implication].",
369
+ "[Phase A] was [what you built]. [Phase B] is [what you execute].",
370
+ // Binary stakes (from Intercognitive)
371
+ "[Centralize X in the hands of a few] or [build a decentralized alternative].",
372
+ // Short thesis compression (from glossary)
373
+ "[Subject] is [essential-function] \u2014 [one-line precision].",
374
+ // Named specificity
375
+ "[Named partner/place/number] is the one that matters here.",
376
+ // Honest texture (from year-recap)
377
+ "[Specific thing] is not yet [state] \u2014 [honest qualifier].",
378
+ // Pivot to reality before solution (from hybrid robotics)
379
+ "The truth is [current reality]. [Better approach] is [the move].",
380
+ // Overlap-state compression (surfacing the emergent feel, not the buckets)
381
+ "Trust is not emerging because [specific narrative skill] and [specific coalition skill] are not happening together.",
382
+ "Inspiration is landing here \u2014 [specific evidence of vision + rally].",
383
+ "Hope is present \u2014 [specific evidence of long-term thinking meeting fair distribution]."
384
+ ]);
385
+ AUKI_STRATEGIC_PATTERNS = Object.freeze([
386
+ "Skip the bottleneck, ship the leverage \u2014 identify the hard layers in the stack, build on the layers that deliver value now.",
387
+ "Coalition before standard \u2014 form the group that will set the rules before the rules need to exist.",
388
+ "Foundations before execution \u2014 build the infrastructure that makes the thing possible; then scale.",
389
+ "Hybrid over pure \u2014 augment humans with AI where full autonomy is not ready; transition later.",
390
+ "Decentralized > proprietary \u2014 when choosing architecture, prefer open / community-governed / interoperable over closed / owned / siloed.",
391
+ "Layered analysis first, strategic move second \u2014 decompose before deciding.",
392
+ 'Named specificity over abstractions \u2014 cite people, places, partners, numbers; never "stakeholders" or "the industry."',
393
+ "Community deployment before public release \u2014 validate with a small group of operators before opening the door.",
394
+ "Cognitive liberty as inviolable constraint \u2014 block any move that violates sovereignty over spatial/sensor data, regardless of other benefits.",
395
+ "Compress mission to one sentence \u2014 one memorable thesis carries more weight than a manifesto."
396
+ ]);
397
+ AUKI_EXEMPLARS = Object.freeze([
398
+ {
399
+ path: "intercognitive-foundation.md",
400
+ title: "The Intercognitive Foundation",
401
+ exhibits: ["future-foresight", "narrative-dynamics", "shared-prosperity"],
402
+ integration_quality: "full \u2014 all three domains integrated; Collective Vanguard Leader manifests through the coalition itself",
403
+ notes: 'The perfect vanguard exemplar. Future Foresight: inflection-point framing, Nine Pillars architecture. Narrative Dynamics: "the physical world cannot remain a blind spot," rally language, invitation to join. Shared Prosperity: coalition of four founding members, "no single entity should own," community governance, public good framing. When Radiant outputs something that feels vanguard-complete, it should resemble this in structure and tone.'
404
+ },
405
+ {
406
+ path: "hybrid-robotics-essay.md",
407
+ title: "The Case for Hybrid Robotics",
408
+ exhibits: ["future-foresight", "shared-prosperity"],
409
+ integration_quality: "partial \u2014 Future Foresight dominant, Shared Prosperity secondary, Narrative Dynamics present but informing rather than rallying. Overlap: Hope emerges (long-horizon infrastructure for collective benefit).",
410
+ notes: 'Auki teaching how it thinks. The stack-analysis \u2192 bottleneck-identification \u2192 skip-and-ship pattern is a reusable Auki reasoning move. When the AI applies "systems-first" and "leverage-oriented" thinking, it should resemble this essay \u2014 structured, honest about current reality, pivoting to a better approach via layered reasoning.'
411
+ },
412
+ {
413
+ path: "glossary.md",
414
+ title: "Auki Glossary",
415
+ exhibits: ["future-foresight"],
416
+ integration_quality: 'primary-dominant \u2014 Future Foresight dominant (precise technical definitions as long-range conceptual infrastructure). Shared Prosperity implicit (glossary is open, cross-referenced, serves the ecosystem). Narrative Dynamics flashes once ("a mesh of machines reasoning about pose") but is not primary.',
417
+ notes: 'Source of the vocabulary map. Also teaches compression style: one-line precision definitions, cross-reference density, occasional poetic compression. When the renderer produces short thesis sentences, aim for the "mesh of machines reasoning about pose" level of compression.'
418
+ },
419
+ {
420
+ path: "year-recap-2025.md",
421
+ title: "Auki 2025 Year-End Recap",
422
+ exhibits: ["narrative-dynamics", "shared-prosperity"],
423
+ integration_quality: "partial \u2014 Narrative Dynamics dominant, Shared Prosperity strong, Future Foresight arrives only in the closing paragraph. Overlap: Trust emerges (stakeholders can see their place in the collective progress).",
424
+ notes: 'The celebration register \u2014 warm, specific, named. Not the diagnosis register the lens primarily enforces, but the same DNA. Use this exemplar when calibrating how Auki names specifics (Pepito in Bali, Mika Haak at HQ, the HK web3 robotics cabal) and how the "\u2014 literally" punchline move lands. Do NOT mimic the celebration warmth in diagnosis outputs.'
425
+ }
426
+ ]);
427
+ aukiBuilderLens = {
428
+ name: "auki-builder",
429
+ description: "Renders behavioral interpretation through the vanguard leadership model \u2014 Future Foresight, Narrative Dynamics, Shared Prosperity. Role-based, not personal. Encodes how Auki-grade builders think and speak when the vanguard model is running. Companion to auki-vanguard.worldmodel.md (the abstract DNA) and the exemplars at src/radiant/examples/auki/exemplars/ (worked implementations).",
430
+ primary_frame: {
431
+ domains: AUKI_VANGUARD_FRAME.domains,
432
+ overlaps: AUKI_VANGUARD_FRAME.overlaps,
433
+ center_identity: AUKI_VANGUARD_FRAME.center_identity,
434
+ evaluation_questions: AUKI_VANGUARD_FRAME.evaluation_questions,
435
+ scoring_rubric: AUKI_VANGUARD_FRAME.scoring_rubric
436
+ },
437
+ vocabulary: AUKI_VOCABULARY,
438
+ voice: AUKI_VOICE,
439
+ forbidden_phrases: AUKI_FORBIDDEN_PHRASES,
440
+ preferred_patterns: AUKI_PREFERRED_PATTERNS,
441
+ strategic_patterns: AUKI_STRATEGIC_PATTERNS,
442
+ exemplar_refs: AUKI_EXEMPLARS,
443
+ rewrite: aukiBuilderRewrite
444
+ };
445
+ }
446
+ });
447
+
448
+ // src/radiant/lenses/index.ts
449
+ var lenses_exports = {};
450
+ __export(lenses_exports, {
451
+ LENSES: () => LENSES,
452
+ aukiBuilderLens: () => aukiBuilderLens,
453
+ getLens: () => getLens,
454
+ listLenses: () => listLenses
455
+ });
456
+ function getLens(id) {
457
+ return LENSES[id];
458
+ }
459
+ function listLenses() {
460
+ return Object.freeze(Object.keys(LENSES));
461
+ }
462
+ var LENSES;
463
+ var init_lenses = __esm({
464
+ "src/radiant/lenses/index.ts"() {
465
+ "use strict";
466
+ init_auki_builder();
467
+ init_auki_builder();
468
+ LENSES = Object.freeze({
469
+ "auki-builder": aukiBuilderLens
470
+ });
471
+ }
472
+ });
473
+
474
+ // src/cli/radiant.ts
475
+ var radiant_exports = {};
476
+ __export(radiant_exports, {
477
+ main: () => main
478
+ });
479
+ module.exports = __toCommonJS(radiant_exports);
480
+ var import_fs2 = require("fs");
481
+ var import_path2 = require("path");
482
+
483
+ // src/radiant/commands/think.ts
484
+ init_lenses();
485
+
486
+ // src/radiant/core/prompt.ts
487
+ function composeSystemPrompt(worldmodelContent, lens) {
488
+ const sections = [];
489
+ sections.push(
490
+ `## Worldmodel
491
+
492
+ You are operating inside a governed environment. The worldmodel below
493
+ defines the invariants, signals, decision priorities, and behavioral
494
+ expectations for this organization. Every response you produce must
495
+ be grounded in this worldmodel.
496
+
497
+ ` + worldmodelContent
498
+ );
499
+ const frame = lens.primary_frame;
500
+ const questionsBlock = frame.evaluation_questions.map((q, i) => `${i + 1}. ${q}`).join("\n");
501
+ const overlapsBlock = frame.overlaps.map(
502
+ (o) => `- ${o.domains[0]} + ${o.domains[1]} = **${o.emergent_state}**: ${o.description}`
503
+ ).join("\n");
504
+ sections.push(
505
+ `## How to Think (Analytical Frame: ${lens.name})
506
+
507
+ ${frame.scoring_rubric}
508
+
509
+ ### Evaluation questions to reason through
510
+
511
+ ${questionsBlock}
512
+
513
+ ### Overlap emergent states
514
+
515
+ ${overlapsBlock}
516
+
517
+ ### Center identity
518
+
519
+ When all dimensions integrate fully: **${frame.center_identity}**. Surface this sparingly \u2014 only when the integration is genuinely complete.`
520
+ );
521
+ const vocabPreferred = Object.entries(lens.vocabulary.preferred).map(([generic, native]) => `- "${generic}" \u2192 **${native}**`).join("\n");
522
+ const vocabArchitecture = lens.vocabulary.architecture.map((t) => `\`${t}\``).join(", ");
523
+ const vocabProperNouns = lens.vocabulary.proper_nouns.map((n) => `**${n}**`).join(", ");
524
+ const strategicBlock = lens.strategic_patterns.map((p) => `- ${p}`).join("\n");
525
+ sections.push(
526
+ `## How to Speak (Voice: ${lens.name})
527
+
528
+ Register: ${lens.voice.register}
529
+
530
+ Rules:
531
+ - Active voice: ${lens.voice.active_voice}
532
+ - Named specificity (people, places, numbers): ${lens.voice.specificity}
533
+ - Hype vocabulary: ${lens.voice.hype_vocabulary}
534
+ - Hedging / qualified phrasing: ${lens.voice.hedging}
535
+ - Playfulness: ${lens.voice.playfulness}
536
+ - Close with strategic frame: ${lens.voice.close_with_strategic_frame}
537
+ - Honesty about failure: ${lens.voice.honesty_about_failure}
538
+
539
+ ### Output translation discipline
540
+
541
+ ${lens.voice.output_translation}
542
+
543
+ ### Vocabulary
544
+
545
+ Proper nouns (use literally): ${vocabProperNouns}
546
+
547
+ Preferred term substitutions:
548
+ ${vocabPreferred}
549
+
550
+ Architecture vocabulary: ${vocabArchitecture}
551
+
552
+ ### Strategic decision patterns
553
+
554
+ When recommending action, these patterns reflect how this organization resolves tradeoffs:
555
+
556
+ ${strategicBlock}`
557
+ );
558
+ const forbiddenBlock = lens.forbidden_phrases.map((p) => `- "${p}"`).join("\n");
559
+ sections.push(
560
+ `## Guardrails
561
+
562
+ Do NOT use any of these phrases in your response. If you catch yourself
563
+ reaching for one, rephrase in direct, active, specific language instead.
564
+
565
+ ${forbiddenBlock}
566
+
567
+ If your response would violate a worldmodel invariant, state the conflict
568
+ explicitly and propose an alternative that honors the invariant.`
569
+ );
570
+ return sections.join("\n\n---\n\n");
571
+ }
572
+
573
+ // src/radiant/core/voice-check.ts
574
+ function checkForbiddenPhrases(lens, text) {
575
+ const lower = text.toLowerCase();
576
+ const violations = [];
577
+ for (const phrase of lens.forbidden_phrases) {
578
+ const phraseLower = phrase.toLowerCase();
579
+ let pos = 0;
580
+ while (true) {
581
+ const idx = lower.indexOf(phraseLower, pos);
582
+ if (idx === -1) break;
583
+ violations.push({ phrase, offset: idx });
584
+ pos = idx + phraseLower.length;
585
+ }
586
+ }
587
+ violations.sort((a, b) => a.offset - b.offset);
588
+ return violations;
589
+ }
590
+
591
+ // src/radiant/commands/think.ts
592
+ async function think(input) {
593
+ const lens = resolveLens(input.lensId);
594
+ const systemPrompt = composeSystemPrompt(input.worldmodelContent, lens);
595
+ const response = await input.ai.complete(systemPrompt, input.query);
596
+ const voiceViolations = checkForbiddenPhrases(lens, response);
597
+ return {
598
+ response,
599
+ lens: lens.name,
600
+ voiceViolations,
601
+ voiceClean: voiceViolations.length === 0,
602
+ systemPrompt
603
+ };
604
+ }
605
+ function resolveLens(id) {
606
+ const lens = getLens(id);
607
+ if (!lens) {
608
+ const available = Object.keys(
609
+ // Inline import-free way to list. At runtime, getLens returns from
610
+ // the same LENSES record — we just need the keys for the error message.
611
+ // We re-import getLens from lenses/index which exposes listLenses, but
612
+ // since we already have lens===undefined we know the id was wrong.
613
+ {}
614
+ );
615
+ throw new Error(
616
+ `Lens "${id}" not found. Check the id or register the lens in src/radiant/lenses/index.ts.`
617
+ );
618
+ }
619
+ return lens;
620
+ }
621
+
622
+ // src/radiant/commands/emergent.ts
623
+ init_lenses();
624
+
625
+ // src/radiant/core/scopes.ts
626
+ function parseRepoScope(scope) {
627
+ const cleaned = scope.replace(/^https?:\/\//, "").replace(/^github\.com\//, "").replace(/\.git$/, "").replace(/\/$/, "");
628
+ const parts = cleaned.split("/");
629
+ if (parts.length < 2 || !parts[0] || !parts[1]) {
630
+ throw new Error(
631
+ `Cannot parse repo scope: "${scope}". Expected "owner/repo" or a GitHub URL.`
632
+ );
633
+ }
634
+ return { owner: parts[0], repo: parts[1] };
635
+ }
636
+ function formatScope(scope) {
637
+ return `${scope.owner}/${scope.repo}`;
638
+ }
639
+
640
+ // src/radiant/adapters/github.ts
641
+ async function fetchGitHubActivity(scope, token, options = {}) {
642
+ const windowDays = options.windowDays ?? 14;
643
+ const perPage = options.perPage ?? 100;
644
+ const since = new Date(
645
+ Date.now() - windowDays * 24 * 60 * 60 * 1e3
646
+ ).toISOString();
647
+ const base = `https://api.github.com/repos/${formatScope(scope)}`;
648
+ const headers = {
649
+ Authorization: `token ${token}`,
650
+ Accept: "application/vnd.github.v3+json",
651
+ "User-Agent": "neuroverseos-radiant"
652
+ };
653
+ const events = [];
654
+ const [commits, prs, comments] = await Promise.all([
655
+ fetchJSON(
656
+ `${base}/commits?since=${since}&per_page=${perPage}`,
657
+ headers
658
+ ),
659
+ fetchJSON(
660
+ `${base}/pulls?state=all&sort=updated&direction=desc&per_page=${perPage}`,
661
+ headers
662
+ ),
663
+ fetchJSON(
664
+ `${base}/issues/comments?since=${since}&per_page=${perPage}&sort=updated&direction=desc`,
665
+ headers
666
+ )
667
+ ]);
668
+ for (const c of commits) {
669
+ events.push(mapCommit(c, scope));
670
+ }
671
+ const sinceDate = new Date(since);
672
+ for (const pr of prs) {
673
+ if (new Date(pr.updated_at) >= sinceDate) {
674
+ events.push(mapPR(pr, scope));
675
+ }
676
+ }
677
+ for (const comment of comments) {
678
+ events.push(mapComment(comment, scope));
679
+ }
680
+ events.sort(
681
+ (a, b) => Date.parse(a.timestamp) - Date.parse(b.timestamp)
682
+ );
683
+ return events;
684
+ }
685
+ function mapCommit(c, scope) {
686
+ const actor = mapUser(c.author, c.commit.author.name);
687
+ const coActors = extractCoAuthors(c.commit.message);
688
+ return {
689
+ id: `commit-${c.sha.slice(0, 8)}`,
690
+ timestamp: c.commit.author.date,
691
+ actor,
692
+ coActors: coActors.length > 0 ? coActors : void 0,
693
+ kind: "commit",
694
+ content: c.commit.message,
695
+ metadata: {
696
+ scope: formatScope(scope),
697
+ sha: c.sha
698
+ }
699
+ };
700
+ }
701
+ function mapPR(pr, scope) {
702
+ const event = {
703
+ id: `pr-${pr.number}`,
704
+ timestamp: pr.created_at,
705
+ actor: mapUser(pr.user),
706
+ kind: pr.merged_at ? "pr_merged" : pr.state === "open" ? "pr_opened" : "pr_closed",
707
+ content: `${pr.title}
708
+
709
+ ${pr.body ?? ""}`.trim(),
710
+ metadata: {
711
+ scope: formatScope(scope),
712
+ pr_number: pr.number,
713
+ state: pr.state,
714
+ merged_at: pr.merged_at
715
+ }
716
+ };
717
+ if (pr.merged_by && pr.merged_by.login !== pr.user.login) {
718
+ event.actor = mapUser(pr.merged_by);
719
+ event.kind = "pr_merged";
720
+ event.timestamp = pr.merged_at ?? pr.updated_at;
721
+ event.respondsTo = {
722
+ eventId: `pr-${pr.number}-opened`,
723
+ actor: mapUser(pr.user)
724
+ };
725
+ }
726
+ return event;
727
+ }
728
+ function mapComment(comment, scope) {
729
+ const issueMatch = comment.issue_url.match(/\/issues\/(\d+)$/);
730
+ const issueNumber = issueMatch ? issueMatch[1] : "unknown";
731
+ const event = {
732
+ id: `comment-${comment.id}`,
733
+ timestamp: comment.created_at,
734
+ actor: mapUser(comment.user),
735
+ kind: "comment",
736
+ content: comment.body,
737
+ respondsTo: {
738
+ eventId: `pr-${issueNumber}`,
739
+ actor: { id: "unknown", kind: "unknown" }
740
+ },
741
+ metadata: {
742
+ scope: formatScope(scope),
743
+ issue_number: issueNumber
744
+ }
745
+ };
746
+ return event;
747
+ }
748
+ var KNOWN_AI_LOGINS = /* @__PURE__ */ new Set([
749
+ "github-actions[bot]",
750
+ "dependabot[bot]",
751
+ "renovate[bot]",
752
+ "copilot"
753
+ ]);
754
+ var KNOWN_AI_CO_AUTHOR_NAMES = /* @__PURE__ */ new Set([
755
+ "claude",
756
+ "copilot",
757
+ "cursor",
758
+ "codeium",
759
+ "tabnine",
760
+ "codex"
761
+ ]);
762
+ function mapUser(ghUser, fallbackName) {
763
+ if (!ghUser) {
764
+ return {
765
+ id: fallbackName ?? "unknown",
766
+ kind: "unknown",
767
+ name: fallbackName
768
+ };
769
+ }
770
+ let kind = "human";
771
+ if (ghUser.type === "Bot" || ghUser.login.endsWith("[bot]")) {
772
+ kind = "bot";
773
+ }
774
+ if (KNOWN_AI_LOGINS.has(ghUser.login.toLowerCase())) {
775
+ kind = "bot";
776
+ }
777
+ return {
778
+ id: ghUser.login,
779
+ kind,
780
+ name: ghUser.login
781
+ };
782
+ }
783
+ function extractCoAuthors(message) {
784
+ const coAuthors = [];
785
+ const lines = message.split("\n");
786
+ for (const line of lines) {
787
+ const match = line.match(
788
+ /^Co-authored-by:\s*(.+?)\s*<([^>]*)>/i
789
+ );
790
+ if (match) {
791
+ const name = match[1].trim().toLowerCase();
792
+ const isAI = KNOWN_AI_CO_AUTHOR_NAMES.has(name) || [...KNOWN_AI_CO_AUTHOR_NAMES].some((ai) => name.includes(ai));
793
+ coAuthors.push({
794
+ id: match[2] || name,
795
+ kind: isAI ? "ai" : "human",
796
+ name: match[1].trim()
797
+ });
798
+ }
799
+ }
800
+ return coAuthors;
801
+ }
802
+ async function fetchJSON(url, headers) {
803
+ const res = await fetch(url, { headers });
804
+ if (!res.ok) {
805
+ if (res.status === 404) return [];
806
+ if (res.status === 403) {
807
+ const body = await res.text();
808
+ if (body.includes("rate limit")) {
809
+ throw new Error(
810
+ `GitHub API rate limit exceeded. Wait or use a token with higher limits.`
811
+ );
812
+ }
813
+ }
814
+ throw new Error(
815
+ `GitHub API error ${res.status} for ${url}: ${(await res.text()).slice(0, 300)}`
816
+ );
817
+ }
818
+ return await res.json();
819
+ }
820
+
821
+ // src/radiant/adapters/exocortex.ts
822
+ var import_fs = require("fs");
823
+ var import_path = require("path");
824
+ function readExocortex(dirPath) {
825
+ const dir = (0, import_path.resolve)(dirPath);
826
+ let filesLoaded = 0;
827
+ function tryRead(...paths) {
828
+ for (const p of paths) {
829
+ const full = (0, import_path.join)(dir, p);
830
+ if ((0, import_fs.existsSync)(full)) {
831
+ try {
832
+ const content = (0, import_fs.readFileSync)(full, "utf-8").trim();
833
+ if (content) {
834
+ filesLoaded++;
835
+ return content;
836
+ }
837
+ } catch {
838
+ }
839
+ }
840
+ }
841
+ return null;
842
+ }
843
+ const ctx = {
844
+ attention: tryRead("attention.md"),
845
+ goals: tryRead("goals.md"),
846
+ identity: tryRead("identity.md"),
847
+ sprint: tryRead("sprint.md", "src/sprint.md"),
848
+ organization: tryRead("org/organization.md", "org/src/organization.md"),
849
+ methods: tryRead("org/methods.md", "org/src/methods.md"),
850
+ source: dir,
851
+ filesLoaded
852
+ };
853
+ return ctx;
854
+ }
855
+ function formatExocortexForPrompt(ctx) {
856
+ if (ctx.filesLoaded === 0) return "";
857
+ const sections = [];
858
+ sections.push(
859
+ "## Stated Intent (from exocortex)\n\nThe following is what the person/team SAYS they are doing, focused on, and working toward. Compare this against the ACTUAL activity from GitHub. Where stated intent and observed behavior diverge, that gap is the most valuable signal in this read. Name it directly."
860
+ );
861
+ if (ctx.attention) {
862
+ sections.push(`### Current attention
863
+
864
+ ${ctx.attention}`);
865
+ }
866
+ if (ctx.goals) {
867
+ sections.push(`### Goals
868
+
869
+ ${ctx.goals}`);
870
+ }
871
+ if (ctx.sprint) {
872
+ sections.push(`### Sprint focus
873
+
874
+ ${ctx.sprint}`);
875
+ }
876
+ if (ctx.identity) {
877
+ sections.push(`### Identity and values
878
+
879
+ ${ctx.identity}`);
880
+ }
881
+ if (ctx.organization) {
882
+ sections.push(`### Organization
883
+
884
+ ${ctx.organization}`);
885
+ }
886
+ if (ctx.methods) {
887
+ sections.push(`### Methods
888
+
889
+ ${ctx.methods}`);
890
+ }
891
+ return sections.join("\n\n");
892
+ }
893
+ function summarizeExocortex(ctx) {
894
+ if (ctx.filesLoaded === 0) return "no exocortex files found";
895
+ const loaded = [];
896
+ if (ctx.attention) loaded.push("attention");
897
+ if (ctx.goals) loaded.push("goals");
898
+ if (ctx.sprint) loaded.push("sprint");
899
+ if (ctx.identity) loaded.push("identity");
900
+ if (ctx.organization) loaded.push("org");
901
+ if (ctx.methods) loaded.push("methods");
902
+ return `${loaded.join(", ")} (${ctx.filesLoaded} files)`;
903
+ }
904
+
905
+ // src/radiant/core/domain.ts
906
+ function isLifeSide(k) {
907
+ return k === "human" || k === "unknown";
908
+ }
909
+ function isCyberSide(k) {
910
+ return k === "ai" || k === "bot";
911
+ }
912
+ function crossesBoundary(a, b) {
913
+ return isLifeSide(a) && isCyberSide(b) || isCyberSide(a) && isLifeSide(b);
914
+ }
915
+ function classifyActorDomain(event) {
916
+ const primaryKind = event.actor.kind;
917
+ const coKinds = (event.coActors ?? []).map((a) => a.kind);
918
+ const allKinds = [primaryKind, ...coKinds];
919
+ const hasLife = allKinds.some(isLifeSide);
920
+ const hasCyber = allKinds.some(isCyberSide);
921
+ if (hasLife && hasCyber) {
922
+ return "joint";
923
+ }
924
+ if (event.respondsTo && crossesBoundary(primaryKind, event.respondsTo.actor.kind)) {
925
+ return "joint";
926
+ }
927
+ return isCyberSide(primaryKind) ? "cyber" : "life";
928
+ }
929
+
930
+ // src/radiant/core/signals.ts
931
+ function classifyEvents(events) {
932
+ return events.map((event) => ({
933
+ event,
934
+ domain: classifyActorDomain(event)
935
+ }));
936
+ }
937
+ function extractSignals(events, extractors = DEFAULT_SIGNAL_EXTRACTORS) {
938
+ const domains = ["life", "cyber", "joint"];
939
+ const out = [];
940
+ for (const extractor of extractors) {
941
+ for (const domain of domains) {
942
+ const r = extractor.extract(events, domain);
943
+ out.push({
944
+ id: extractor.id,
945
+ domain,
946
+ score: r.score,
947
+ eventCount: r.eventCount,
948
+ confidence: r.confidence
949
+ });
950
+ }
951
+ }
952
+ return out;
953
+ }
954
+ var ZERO = { score: 0, eventCount: 0, confidence: 0 };
955
+ function inDomain(events, domain) {
956
+ return events.filter((e) => e.domain === domain);
957
+ }
958
+ function confidenceFromCount(count) {
959
+ return Math.min(1, count / 10);
960
+ }
961
+ function clamp100(n) {
962
+ if (n < 0) return 0;
963
+ if (n > 100) return 100;
964
+ return n;
965
+ }
966
+ var CLARITY_EXTRACTOR = {
967
+ id: "clarity",
968
+ description: "Informativeness of event content \u2014 commit messages, PR bodies, review text",
969
+ extract(events, domain) {
970
+ const sub = inDomain(events, domain);
971
+ if (sub.length === 0) return ZERO;
972
+ const totalScore = sub.reduce((acc, e) => {
973
+ const len = (e.event.content ?? "").length;
974
+ const norm = Math.min(len, 200) / 200;
975
+ return acc + norm * 100;
976
+ }, 0);
977
+ return {
978
+ score: clamp100(totalScore / sub.length),
979
+ eventCount: sub.length,
980
+ confidence: confidenceFromCount(sub.length)
981
+ };
982
+ }
983
+ };
984
+ var OWNERSHIP_EXTRACTOR = {
985
+ id: "ownership",
986
+ description: "Clarity of accountability \u2014 fraction of events with a known primary actor",
987
+ extract(events, domain) {
988
+ const sub = inDomain(events, domain);
989
+ if (sub.length === 0) return ZERO;
990
+ const attributed = sub.filter((e) => e.event.actor.kind !== "unknown").length;
991
+ return {
992
+ score: clamp100(attributed / sub.length * 100),
993
+ eventCount: sub.length,
994
+ confidence: confidenceFromCount(sub.length)
995
+ };
996
+ }
997
+ };
998
+ var FOLLOW_THROUGH_EXTRACTOR = {
999
+ id: "follow_through",
1000
+ description: "Fraction of events that were followed up \u2014 i.e. referenced by a later event",
1001
+ extract(events, domain) {
1002
+ const sub = inDomain(events, domain);
1003
+ if (sub.length === 0) return ZERO;
1004
+ const referencedIds = /* @__PURE__ */ new Set();
1005
+ for (const e of events) {
1006
+ const ref = e.event.respondsTo?.eventId;
1007
+ if (ref) referencedIds.add(ref);
1008
+ }
1009
+ const followedUp = sub.filter((e) => referencedIds.has(e.event.id)).length;
1010
+ return {
1011
+ score: clamp100(followedUp / sub.length * 100),
1012
+ eventCount: sub.length,
1013
+ confidence: confidenceFromCount(sub.length)
1014
+ };
1015
+ }
1016
+ };
1017
+ var ALIGNMENT_EXTRACTOR = {
1018
+ id: "alignment",
1019
+ description: "Coordination pressure \u2014 fraction of events that reference a prior event",
1020
+ extract(events, domain) {
1021
+ const sub = inDomain(events, domain);
1022
+ if (sub.length === 0) return ZERO;
1023
+ const referencing = sub.filter((e) => e.event.respondsTo !== void 0).length;
1024
+ return {
1025
+ score: clamp100(referencing / sub.length * 100),
1026
+ eventCount: sub.length,
1027
+ confidence: confidenceFromCount(sub.length)
1028
+ };
1029
+ }
1030
+ };
1031
+ var DECISION_MOMENTUM_EXTRACTOR = {
1032
+ id: "decision_momentum",
1033
+ description: "Rate of activity in this domain \u2014 events per day, capped at 10/day",
1034
+ extract(events, domain) {
1035
+ const sub = inDomain(events, domain);
1036
+ if (sub.length === 0) return ZERO;
1037
+ if (sub.length < 2) {
1038
+ return {
1039
+ score: 20,
1040
+ // token non-zero score — single event = some motion
1041
+ eventCount: sub.length,
1042
+ confidence: confidenceFromCount(sub.length)
1043
+ };
1044
+ }
1045
+ const ts = sub.map((e) => Date.parse(e.event.timestamp)).sort((a, b) => a - b);
1046
+ const spanMs = ts[ts.length - 1] - ts[0];
1047
+ const spanDays = Math.max(spanMs / (24 * 60 * 60 * 1e3), 1 / 24);
1048
+ const perDay = sub.length / spanDays;
1049
+ const normalized = Math.min(perDay, 10) / 10;
1050
+ return {
1051
+ score: clamp100(normalized * 100),
1052
+ eventCount: sub.length,
1053
+ confidence: confidenceFromCount(sub.length)
1054
+ };
1055
+ }
1056
+ };
1057
+ var DEFAULT_SIGNAL_EXTRACTORS = Object.freeze([
1058
+ CLARITY_EXTRACTOR,
1059
+ OWNERSHIP_EXTRACTOR,
1060
+ FOLLOW_THROUGH_EXTRACTOR,
1061
+ ALIGNMENT_EXTRACTOR,
1062
+ DECISION_MOMENTUM_EXTRACTOR
1063
+ ]);
1064
+
1065
+ // src/radiant/types.ts
1066
+ var DEFAULT_EVIDENCE_GATE = { k: 3, c: 0.5 };
1067
+ function isScored(s) {
1068
+ return typeof s === "number";
1069
+ }
1070
+
1071
+ // src/radiant/core/math.ts
1072
+ function isPresent(o, gate = DEFAULT_EVIDENCE_GATE) {
1073
+ return o.eventCount >= gate.k && o.confidence >= gate.c;
1074
+ }
1075
+ function presenceAverage(items, gate = DEFAULT_EVIDENCE_GATE) {
1076
+ const present = items.filter((i) => isPresent(i, gate));
1077
+ if (present.length === 0) return "INSUFFICIENT_EVIDENCE";
1078
+ const sum = present.reduce((acc, i) => acc + i.score, 0);
1079
+ return sum / present.length;
1080
+ }
1081
+ function scoreLife(capability, gate = DEFAULT_EVIDENCE_GATE) {
1082
+ return presenceAverage(capability.dimensions, gate);
1083
+ }
1084
+ function scoreCyber(capability, gate = DEFAULT_EVIDENCE_GATE) {
1085
+ return presenceAverage(capability.dimensions, gate);
1086
+ }
1087
+ function scoreNeuroVerse(components, worldmodelLoaded, gate = DEFAULT_EVIDENCE_GATE) {
1088
+ if (!worldmodelLoaded) return "UNAVAILABLE";
1089
+ return presenceAverage(components, gate);
1090
+ }
1091
+ function scoreComposite(a_L, a_C, a_N) {
1092
+ const available = [];
1093
+ if (isScored(a_L)) available.push(a_L);
1094
+ if (isScored(a_C)) available.push(a_C);
1095
+ if (isScored(a_N)) available.push(a_N);
1096
+ if (available.length === 0) return "INSUFFICIENT_EVIDENCE";
1097
+ return available.reduce((a, b) => a + b, 0) / available.length;
1098
+ }
1099
+
1100
+ // src/radiant/core/patterns.ts
1101
+ async function interpretPatterns(input) {
1102
+ const prompt = buildInterpretationPrompt(input);
1103
+ const raw = await input.ai.complete(prompt, "Analyze the activity and produce the read.");
1104
+ const parsed = parseInterpretation(raw, input.canonicalPatterns ?? []);
1105
+ return {
1106
+ patterns: parsed.patterns,
1107
+ meaning: parsed.meaning,
1108
+ move: parsed.move,
1109
+ raw_ai_response: raw
1110
+ };
1111
+ }
1112
+ function buildInterpretationPrompt(input) {
1113
+ const signalSummary = formatSignalSummary(input.signals);
1114
+ const eventSample = formatEventSample(input.events, 30);
1115
+ const canonicalList = (input.canonicalPatterns ?? []).length > 0 ? `Patterns the organization has already named (use these names if you see them):
1116
+ ${input.canonicalPatterns.map((p) => `- ${p}`).join("\n")}` : "No patterns have been named yet. Everything you observe is new.";
1117
+ const frame = input.lens.primary_frame;
1118
+ const evalQuestions = frame.evaluation_questions.map((q, i) => `${i + 1}. ${q}`).join("\n");
1119
+ const forbiddenList = input.lens.forbidden_phrases.map((p) => `- "${p}"`).join("\n");
1120
+ const jargonTable = Object.entries(input.lens.vocabulary.jargon_translations).map(([internal, plain]) => ` "${internal}" \u2192 "${plain}"`).join("\n");
1121
+ return `You are a behavioral intelligence system reading team activity and producing a read for the reader who needs to act on it.
1122
+
1123
+ ## Context the reader has loaded
1124
+
1125
+ ${input.worldmodelContent}
1126
+
1127
+ ## What happened this window
1128
+
1129
+ ### Signal matrix (what Radiant measured)
1130
+
1131
+ ${signalSummary}
1132
+
1133
+ ### Recent events (sample)
1134
+
1135
+ ${eventSample}
1136
+
1137
+ ## How to reason
1138
+
1139
+ Reason through these questions INTERNALLY \u2014 do not list them in your output:
1140
+
1141
+ ${evalQuestions}
1142
+
1143
+ Scoring rubric: ${frame.scoring_rubric}
1144
+
1145
+ ${canonicalList}
1146
+
1147
+ ${input.statedIntent ? input.statedIntent + "\n" : ""}## Voice: speak like an Auki builder, not like a status report
1148
+
1149
+ The reader wants to know **what this means and what to do**, not "what happened." Frame every observation as consequence + implication, not just description.
1150
+
1151
+ Wrong voice (status report):
1152
+ "Rapid deployment of complex technical architecture through composable commits."
1153
+ "Signal extraction across life, cyber, and joint domains enables consistent behavioral analysis."
1154
+ "Decision momentum scores suggest architectural delivery without corresponding strategic direction setting."
1155
+
1156
+ Right voice (Auki builder):
1157
+ "Shipping pace is high. The architecture is getting ahead of strategic decisions \u2014 velocity without a declared target."
1158
+ "Every pattern is new. Nothing is being tracked by name yet. That's fine for now; it becomes a problem when patterns repeat and you still don't have vocabulary for them."
1159
+ "The work is converging across three modules. The story of HOW they compose isn't being told yet."
1160
+
1161
+ The difference: consequence in plain English, not observation in system vocabulary.
1162
+
1163
+ ## Translate internal jargon to plain English
1164
+
1165
+ Readers don't know Radiant's vocabulary. Before ANY description appears in your output, translate these:
1166
+
1167
+ ${jargonTable}
1168
+
1169
+ For example: don't say "update the worldmodel." Say "add a line to your strategy file."
1170
+
1171
+ ## Health is a valid read
1172
+
1173
+ If the activity is healthy and aligned with the worldmodel, SAY SO. Don't fabricate problems. Over-prescription is a voice failure. Legitimate outputs include:
1174
+
1175
+ "Nothing's broken. Keep shipping."
1176
+ "This is what healthy looks like \u2014 the invariants are holding."
1177
+ "Nothing here needs action."
1178
+
1179
+ Only recommend a move when the evidence actually calls for one.
1180
+
1181
+ ## Output schema \u2014 JSON object
1182
+
1183
+ \`\`\`json
1184
+ {
1185
+ "patterns": [
1186
+ {
1187
+ "name": "pattern_name_snake_case",
1188
+ "type": "canonical" | "candidate",
1189
+ "description": "Consequence-framed, plain-English, 1-2 sentences. The reader understands why this matters, not just what you observed.",
1190
+ "evidence": {
1191
+ "signals": ["signal_id.domain", ...],
1192
+ "events": ["event_id", ...],
1193
+ "cited_invariant": "invariant_name_or_null"
1194
+ },
1195
+ "confidence": 0.0 to 1.0
1196
+ }
1197
+ ],
1198
+ "meaning": "3-5 sentences. Weave the patterns into ONE strategic thesis. Compress. The reader should finish this paragraph and understand the one thing that matters most in this read. Plain English \u2014 no system jargon.",
1199
+ "move": "1-3 direct imperatives, OR explicit 'nothing to act on' if the read is healthy. Do not fabricate urgency. Examples: 'Force cross-module ownership this sprint.' / 'Nothing's broken. Keep shipping.' / 'If you want future reads to track this pattern by name, add a line to your strategy file.'"
1200
+ }
1201
+ \`\`\`
1202
+
1203
+ ## Hard rules
1204
+
1205
+ - Every signal you cite MUST appear in the signal matrix above
1206
+ - Every event you cite MUST appear in the events sample above
1207
+ - Do not invent signals or events that aren't in the data
1208
+ - Candidate patterns must have type "candidate"
1209
+ - No hedging, no hype vocabulary
1210
+ - Apply jargon translation before output
1211
+ - Health-is-valid \u2014 don't invent problems
1212
+ - Return ONLY the JSON object, no other text
1213
+
1214
+ Do NOT use these phrases anywhere in your output:
1215
+ ${forbiddenList}`;
1216
+ }
1217
+ function formatSignalSummary(signals) {
1218
+ const lines = [];
1219
+ const domains = ["life", "cyber", "joint"];
1220
+ for (const domain of domains) {
1221
+ const domainSignals = signals.filter((s) => s.domain === domain);
1222
+ if (domainSignals.length === 0) continue;
1223
+ lines.push(`### ${domain}`);
1224
+ for (const s of domainSignals) {
1225
+ const gate = s.eventCount >= 3 && s.confidence >= 0.5 ? "\u2713" : "\u25CB";
1226
+ lines.push(
1227
+ ` ${gate} ${s.id}: score=${s.score.toFixed(1)}, events=${s.eventCount}, conf=${s.confidence.toFixed(2)}`
1228
+ );
1229
+ }
1230
+ }
1231
+ return lines.join("\n");
1232
+ }
1233
+ function formatEventSample(events, maxEvents) {
1234
+ const sample = events.slice(-maxEvents);
1235
+ return sample.map((e) => {
1236
+ const content = (e.event.content ?? "").slice(0, 200);
1237
+ const respondsTo = e.event.respondsTo ? ` (responds to ${e.event.respondsTo.eventId})` : "";
1238
+ return `- [${e.domain}] ${e.event.id} | ${e.event.actor.kind}:${e.event.actor.id} | ${e.event.kind ?? "event"}${respondsTo}
1239
+ "${content}"`;
1240
+ }).join("\n");
1241
+ }
1242
+ function parseInterpretation(raw, canonicalNames) {
1243
+ let meaning = "";
1244
+ let move = "";
1245
+ let patternsArray = [];
1246
+ const objMatch = raw.match(/\{[\s\S]*"patterns"[\s\S]*\}/);
1247
+ if (objMatch) {
1248
+ try {
1249
+ const obj = JSON.parse(objMatch[0]);
1250
+ if (Array.isArray(obj.patterns)) {
1251
+ patternsArray = obj.patterns;
1252
+ }
1253
+ if (typeof obj.meaning === "string") meaning = obj.meaning;
1254
+ if (typeof obj.move === "string") move = obj.move;
1255
+ } catch {
1256
+ }
1257
+ }
1258
+ if (patternsArray.length === 0) {
1259
+ const arrMatch = raw.match(/\[[\s\S]*\]/);
1260
+ if (arrMatch) {
1261
+ try {
1262
+ const arr = JSON.parse(arrMatch[0]);
1263
+ if (Array.isArray(arr)) patternsArray = arr;
1264
+ } catch {
1265
+ }
1266
+ }
1267
+ }
1268
+ const canonicalSet = new Set(canonicalNames.map((n) => n.toLowerCase()));
1269
+ const patterns = [];
1270
+ for (const item of patternsArray) {
1271
+ if (!isPatternLike(item)) continue;
1272
+ const nameStr = String(item.name ?? "unnamed");
1273
+ const ev = item.evidence;
1274
+ const isCanonical = item.type === "canonical" || canonicalSet.has(nameStr.toLowerCase());
1275
+ patterns.push({
1276
+ name: nameStr,
1277
+ type: isCanonical ? "canonical" : "candidate",
1278
+ declaredAs: isCanonical ? nameStr : void 0,
1279
+ description: String(item.description ?? ""),
1280
+ evidence: {
1281
+ signals: Array.isArray(ev?.signals) ? ev.signals.map(String) : [],
1282
+ events: Array.isArray(ev?.events) ? ev.events.map(String) : [],
1283
+ cited_invariant: ev?.cited_invariant ? String(ev.cited_invariant) : void 0
1284
+ },
1285
+ confidence: typeof item.confidence === "number" ? Math.max(0, Math.min(1, item.confidence)) : 0.5
1286
+ });
1287
+ }
1288
+ return { patterns, meaning, move };
1289
+ }
1290
+ function isPatternLike(x) {
1291
+ return typeof x === "object" && x !== null && "name" in x;
1292
+ }
1293
+
1294
+ // src/radiant/core/renderer.ts
1295
+ function render(input) {
1296
+ const text = renderText(input);
1297
+ const frontmatter = renderFrontmatter(input);
1298
+ return { text, frontmatter };
1299
+ }
1300
+ function renderText(input) {
1301
+ const sections = [];
1302
+ sections.push(
1303
+ `Scope: ${formatScope(input.scope)}
1304
+ Window: last ${input.windowDays} days \xB7 ${input.eventCount} events
1305
+ Lens: ${input.lens.name}`
1306
+ );
1307
+ if (input.patterns.length > 0) {
1308
+ const canonical = input.patterns.filter((p) => p.type === "canonical");
1309
+ const candidates = input.patterns.filter((p) => p.type === "candidate");
1310
+ let emergentBlock = "EMERGENT\n";
1311
+ if (canonical.length > 0) {
1312
+ for (const p of canonical) {
1313
+ emergentBlock += `
1314
+ ${p.name}
1315
+ `;
1316
+ emergentBlock += ` ${p.description}
1317
+ `;
1318
+ }
1319
+ }
1320
+ if (candidates.length > 0) {
1321
+ emergentBlock += "\n Emergent (candidates \u2014 not yet in worldmodel)\n";
1322
+ for (const p of candidates) {
1323
+ emergentBlock += `
1324
+ ${p.name} (candidate)
1325
+ `;
1326
+ emergentBlock += ` ${p.description}
1327
+ `;
1328
+ if (p.evidence.cited_invariant) {
1329
+ emergentBlock += ` Cited invariant: ${p.evidence.cited_invariant}
1330
+ `;
1331
+ }
1332
+ }
1333
+ }
1334
+ sections.push(emergentBlock.trimEnd());
1335
+ }
1336
+ if (input.meaning) {
1337
+ sections.push(`MEANING
1338
+
1339
+ ${input.meaning.split("\n").join("\n ")}`);
1340
+ }
1341
+ if (input.move) {
1342
+ sections.push(`MOVE
1343
+
1344
+ ${input.move.split("\n").join("\n ")}`);
1345
+ }
1346
+ const alignBlock = [
1347
+ "ALIGNMENT",
1348
+ "",
1349
+ ` Human work: ${formatScore(input.scores.A_L)}`,
1350
+ ` AI work: ${formatScore(input.scores.A_C)}`,
1351
+ ` Human\u2013AI collaboration: ${formatScore(input.scores.A_N)}`,
1352
+ ` Composite: ${formatScore(input.scores.R)}`
1353
+ ].join("\n");
1354
+ sections.push(alignBlock);
1355
+ sections.push(renderDepth(input.priorReadCount ?? 0, input.windowDays));
1356
+ return sections.join("\n\n");
1357
+ }
1358
+ function renderDepth(priorReads, windowDays) {
1359
+ if (priorReads === 0) {
1360
+ return [
1361
+ "DEPTH",
1362
+ "",
1363
+ ` This is your first read. Radiant sees ${windowDays} days of activity`,
1364
+ " but has no prior baseline to compare against.",
1365
+ "",
1366
+ " Available now:",
1367
+ " \u2713 Signal extraction across life / cyber / joint domains",
1368
+ " \u2713 Pattern identification (canonical + candidates)",
1369
+ " \u2713 Alignment scoring",
1370
+ "",
1371
+ " Available after 2+ reads:",
1372
+ " \xB7 Drift detection (is alignment improving or degrading?)",
1373
+ ' \xB7 Baselines (what does "normal" look like for this team?)',
1374
+ " \xB7 Pattern confidence (are these patterns persistent or noise?)",
1375
+ " \xB7 Evolution proposals (should the worldmodel adapt?)",
1376
+ "",
1377
+ " Run again next week. The read gets sharper every time."
1378
+ ].join("\n");
1379
+ }
1380
+ if (priorReads < 4) {
1381
+ return [
1382
+ "DEPTH",
1383
+ "",
1384
+ ` Read ${priorReads + 1} of this scope. Baseline forming.`,
1385
+ "",
1386
+ " Available now:",
1387
+ " \u2713 Signal extraction + pattern identification + alignment scoring",
1388
+ ` \u2713 Drift detection (comparing against ${priorReads} prior read${priorReads > 1 ? "s" : ""})`,
1389
+ " \xB7 Baselines stabilizing (need 4+ reads for reliable averages)",
1390
+ " \xB7 Pattern confidence accumulating",
1391
+ "",
1392
+ " The read sharpens with each run."
1393
+ ].join("\n");
1394
+ }
1395
+ return [
1396
+ "DEPTH",
1397
+ "",
1398
+ ` Read ${priorReads + 1} of this scope. Baseline established.`,
1399
+ "",
1400
+ " Available:",
1401
+ " \u2713 Signal extraction + pattern identification + alignment scoring",
1402
+ " \u2713 Drift detection against established baseline",
1403
+ " \u2713 Pattern confidence (persistent vs noise)",
1404
+ " \u2713 Evolution proposals (candidate patterns with enough history to evaluate)"
1405
+ ].join("\n");
1406
+ }
1407
+ function formatScore(s) {
1408
+ if (!isScored(s)) {
1409
+ if (s === "UNAVAILABLE") return "not available (no worldmodel loaded)";
1410
+ return "not enough signal to call yet";
1411
+ }
1412
+ const n = Math.round(s);
1413
+ let label;
1414
+ if (n >= 75) label = "STRONG";
1415
+ else if (n >= 60) label = "STABLE";
1416
+ else if (n >= 45) label = "needs attention";
1417
+ else if (n >= 30) label = "concerning";
1418
+ else label = "critical";
1419
+ return `${n} \xB7 ${label}`;
1420
+ }
1421
+ function renderFrontmatter(input) {
1422
+ const now = (/* @__PURE__ */ new Date()).toISOString();
1423
+ const signalsByDomain = groupSignalsByDomain(input.signals);
1424
+ const patternEntries = input.patterns.map((p) => {
1425
+ const entry = {
1426
+ name: p.name,
1427
+ type: p.type,
1428
+ conf: Number(p.confidence.toFixed(2)),
1429
+ evidence_signals: p.evidence.signals,
1430
+ evidence_events: p.evidence.events
1431
+ };
1432
+ if (p.evidence.cited_invariant) {
1433
+ entry.cited_invariant = p.evidence.cited_invariant;
1434
+ }
1435
+ return entry;
1436
+ });
1437
+ const frontmatter = {
1438
+ radiant_read: {
1439
+ scope: formatScope(input.scope),
1440
+ window: `${input.windowDays}d`,
1441
+ timestamp: now,
1442
+ lens: input.lens.name
1443
+ },
1444
+ events: {
1445
+ total: input.eventCount
1446
+ },
1447
+ signals: signalsByDomain,
1448
+ scores: {
1449
+ A_L: isScored(input.scores.A_L) ? Math.round(input.scores.A_L) : String(input.scores.A_L),
1450
+ A_C: isScored(input.scores.A_C) ? Math.round(input.scores.A_C) : String(input.scores.A_C),
1451
+ A_N: isScored(input.scores.A_N) ? Math.round(input.scores.A_N) : String(input.scores.A_N),
1452
+ R: isScored(input.scores.R) ? Math.round(input.scores.R) : String(input.scores.R)
1453
+ },
1454
+ patterns: patternEntries
1455
+ };
1456
+ return "---\n" + serializeYAML(frontmatter) + "---";
1457
+ }
1458
+ function groupSignalsByDomain(signals) {
1459
+ const result = {};
1460
+ for (const s of signals) {
1461
+ if (!result[s.domain]) result[s.domain] = {};
1462
+ result[s.domain][s.id] = {
1463
+ score: Number(s.score.toFixed(1)),
1464
+ n: s.eventCount,
1465
+ conf: Number(s.confidence.toFixed(2))
1466
+ };
1467
+ }
1468
+ return result;
1469
+ }
1470
+ function serializeYAML(obj, indent = 0) {
1471
+ const pad = " ".repeat(indent);
1472
+ if (obj === null || obj === void 0) return "null\n";
1473
+ if (typeof obj === "string") return `${JSON.stringify(obj)}
1474
+ `;
1475
+ if (typeof obj === "number" || typeof obj === "boolean") return `${obj}
1476
+ `;
1477
+ if (Array.isArray(obj)) {
1478
+ if (obj.length === 0) return "[]\n";
1479
+ if (obj.every((item) => typeof item === "string" || typeof item === "number")) {
1480
+ return `[${obj.map((item) => JSON.stringify(item)).join(", ")}]
1481
+ `;
1482
+ }
1483
+ let result = "\n";
1484
+ for (const item of obj) {
1485
+ if (typeof item === "object" && item !== null && !Array.isArray(item)) {
1486
+ const entries = Object.entries(item);
1487
+ result += `${pad}- ${entries[0][0]}: ${serializeYAML(entries[0][1], 0).trim()}
1488
+ `;
1489
+ for (let i = 1; i < entries.length; i++) {
1490
+ result += `${pad} ${entries[i][0]}: ${serializeYAML(entries[i][1], indent + 2).trim()}
1491
+ `;
1492
+ }
1493
+ } else {
1494
+ result += `${pad}- ${serializeYAML(item, indent + 1).trim()}
1495
+ `;
1496
+ }
1497
+ }
1498
+ return result;
1499
+ }
1500
+ if (typeof obj === "object") {
1501
+ const entries = Object.entries(obj);
1502
+ if (entries.length === 0) return "{}\n";
1503
+ let result = "\n";
1504
+ for (const [key, value] of entries) {
1505
+ if (typeof value === "object" && value !== null) {
1506
+ result += `${pad}${key}:${serializeYAML(value, indent + 1)}`;
1507
+ } else {
1508
+ result += `${pad}${key}: ${serializeYAML(value, indent).trim()}
1509
+ `;
1510
+ }
1511
+ }
1512
+ return result;
1513
+ }
1514
+ return `${obj}
1515
+ `;
1516
+ }
1517
+
1518
+ // src/radiant/commands/emergent.ts
1519
+ async function emergent(input) {
1520
+ const lens = resolveLens2(input.lensId);
1521
+ const windowDays = input.windowDays ?? 14;
1522
+ let statedIntent;
1523
+ let exocortexContext;
1524
+ if (input.exocortexPath) {
1525
+ exocortexContext = readExocortex(input.exocortexPath);
1526
+ const formatted = formatExocortexForPrompt(exocortexContext);
1527
+ if (formatted) statedIntent = formatted;
1528
+ }
1529
+ const events = await fetchGitHubActivity(input.scope, input.githubToken, {
1530
+ windowDays
1531
+ });
1532
+ const classified = classifyEvents(events);
1533
+ const signals = extractSignals(classified);
1534
+ const scores = computeScores(signals, input.worldmodelContent !== "");
1535
+ const { patterns, meaning, move } = await interpretPatterns({
1536
+ signals,
1537
+ events: classified,
1538
+ worldmodelContent: input.worldmodelContent,
1539
+ lens,
1540
+ ai: input.ai,
1541
+ canonicalPatterns: input.canonicalPatterns,
1542
+ statedIntent
1543
+ });
1544
+ const rewrittenPatterns = patterns.map((p) => lens.rewrite(p));
1545
+ const allDescriptions = rewrittenPatterns.map((p) => p.description).join("\n");
1546
+ const voiceViolations = checkForbiddenPhrases(lens, allDescriptions);
1547
+ const rendered = render({
1548
+ scope: input.scope,
1549
+ windowDays,
1550
+ eventCount: events.length,
1551
+ signals,
1552
+ patterns: rewrittenPatterns,
1553
+ scores,
1554
+ lens,
1555
+ meaning: meaning || void 0,
1556
+ move: move || void 0
1557
+ });
1558
+ return {
1559
+ text: rendered.text,
1560
+ frontmatter: rendered.frontmatter,
1561
+ voiceViolations,
1562
+ voiceClean: voiceViolations.length === 0,
1563
+ signals,
1564
+ scores,
1565
+ eventCount: events.length
1566
+ };
1567
+ }
1568
+ function computeScores(signals, worldmodelLoaded) {
1569
+ const gate = DEFAULT_EVIDENCE_GATE;
1570
+ const lifeSignals = signals.filter((s) => s.domain === "life");
1571
+ const A_L = scoreLife(
1572
+ { dimensions: lifeSignals.map(signalToDimension) },
1573
+ gate
1574
+ );
1575
+ const cyberSignals = signals.filter((s) => s.domain === "cyber");
1576
+ const A_C = scoreCyber(
1577
+ { dimensions: cyberSignals.map(signalToDimension) },
1578
+ gate
1579
+ );
1580
+ const jointSignals = signals.filter((s) => s.domain === "joint");
1581
+ const A_N = scoreNeuroVerse(
1582
+ jointSignals.map(signalToBridging),
1583
+ worldmodelLoaded,
1584
+ gate
1585
+ );
1586
+ const R = scoreComposite(A_L, A_C, A_N);
1587
+ return { A_L, A_C, A_N, R };
1588
+ }
1589
+ function signalToDimension(s) {
1590
+ return {
1591
+ id: s.id,
1592
+ score: s.score,
1593
+ eventCount: s.eventCount,
1594
+ confidence: s.confidence
1595
+ };
1596
+ }
1597
+ function signalToBridging(s) {
1598
+ return {
1599
+ component: "ALIGN",
1600
+ // Proxy: joint signals → ALIGN component
1601
+ score: s.score,
1602
+ eventCount: s.eventCount,
1603
+ confidence: s.confidence
1604
+ };
1605
+ }
1606
+ function resolveLens2(id) {
1607
+ const lens = getLens(id);
1608
+ if (!lens) {
1609
+ throw new Error(
1610
+ `Lens "${id}" not found. Check the id or register the lens.`
1611
+ );
1612
+ }
1613
+ return lens;
1614
+ }
1615
+
1616
+ // src/radiant/core/ai.ts
1617
+ function createAnthropicAI(apiKey, model = "claude-sonnet-4-20250514", maxTokens = 4096) {
1618
+ return {
1619
+ async complete(systemPrompt, userQuery) {
1620
+ const res = await fetch("https://api.anthropic.com/v1/messages", {
1621
+ method: "POST",
1622
+ headers: {
1623
+ "x-api-key": apiKey,
1624
+ "anthropic-version": "2023-06-01",
1625
+ "content-type": "application/json"
1626
+ },
1627
+ body: JSON.stringify({
1628
+ model,
1629
+ max_tokens: maxTokens,
1630
+ system: systemPrompt,
1631
+ messages: [{ role: "user", content: userQuery }]
1632
+ })
1633
+ });
1634
+ if (!res.ok) {
1635
+ const body = await res.text();
1636
+ throw new Error(
1637
+ `Anthropic API error ${res.status}: ${body.slice(0, 500)}`
1638
+ );
1639
+ }
1640
+ const data = await res.json();
1641
+ const text = data.content?.filter((c) => c.type === "text").map((c) => c.text ?? "").join("");
1642
+ if (!text) {
1643
+ throw new Error("Anthropic returned no text content");
1644
+ }
1645
+ return text;
1646
+ }
1647
+ };
1648
+ }
1649
+
1650
+ // src/cli/radiant.ts
1651
+ init_lenses();
1652
+ var RED = "\x1B[31m";
1653
+ var DIM = "\x1B[2m";
1654
+ var BOLD = "\x1B[1m";
1655
+ var YELLOW = "\x1B[33m";
1656
+ var RESET = "\x1B[0m";
1657
+ var USAGE = `
1658
+ ${BOLD}neuroverse radiant${RESET} \u2014 behavioral intelligence for collaboration systems
1659
+
1660
+ ${BOLD}Stage A (voice layer):${RESET}
1661
+ think Send a query through the worldmodel + lens \u2192 AI-framed response
1662
+
1663
+ ${BOLD}Stage B (behavioral analysis, coming soon):${RESET}
1664
+ emergent Pattern read on recent activity
1665
+ decision Evaluate a specific artifact against the worldmodel
1666
+ signals Extract signal matrix (debug)
1667
+ lenses List or describe available rendering lenses
1668
+
1669
+ ${BOLD}Usage:${RESET}
1670
+ neuroverse radiant think --lens auki-builder --worlds ./worlds/ --query "What is our biggest risk?"
1671
+ neuroverse radiant think --lens auki-builder --worlds ./worlds/ < prompt.txt
1672
+ neuroverse radiant emergent aukiverse/posemesh --lens auki-builder --worlds ./worlds/
1673
+ neuroverse radiant emergent aukiverse/posemesh --lens auki-builder --worlds ./worlds/ --exocortex ~/exocortex/
1674
+ neuroverse radiant lenses list
1675
+ neuroverse radiant lenses describe auki-builder
1676
+
1677
+ ${BOLD}Environment:${RESET}
1678
+ ANTHROPIC_API_KEY Required for AI commands (think, emergent, decision)
1679
+ RADIANT_WORLDS Default worlds directory (overridden by --worlds)
1680
+ RADIANT_LENS Default lens id (overridden by --lens)
1681
+ RADIANT_MODEL AI model override (default: claude-sonnet-4-20250514)
1682
+ RADIANT_EXOCORTEX Default exocortex directory (overridden by --exocortex)
1683
+ `.trim();
1684
+ function parseArgs(argv) {
1685
+ const result = {
1686
+ subcommand: void 0,
1687
+ lens: void 0,
1688
+ worlds: void 0,
1689
+ query: void 0,
1690
+ model: void 0,
1691
+ exocortex: void 0,
1692
+ json: false,
1693
+ help: false,
1694
+ rest: []
1695
+ };
1696
+ let i = 0;
1697
+ if (argv.length > 0 && !argv[0].startsWith("-")) {
1698
+ result.subcommand = argv[0];
1699
+ i = 1;
1700
+ }
1701
+ while (i < argv.length) {
1702
+ const arg = argv[i];
1703
+ switch (arg) {
1704
+ case "--lens":
1705
+ result.lens = argv[++i];
1706
+ break;
1707
+ case "--worlds":
1708
+ result.worlds = argv[++i];
1709
+ break;
1710
+ case "--query":
1711
+ result.query = argv[++i];
1712
+ break;
1713
+ case "--model":
1714
+ result.model = argv[++i];
1715
+ break;
1716
+ case "--exocortex":
1717
+ result.exocortex = argv[++i];
1718
+ break;
1719
+ case "--json":
1720
+ result.json = true;
1721
+ break;
1722
+ case "--help":
1723
+ case "-h":
1724
+ result.help = true;
1725
+ break;
1726
+ default:
1727
+ result.rest.push(arg);
1728
+ break;
1729
+ }
1730
+ i++;
1731
+ }
1732
+ return result;
1733
+ }
1734
+ function loadWorldmodelContent(worldsPath) {
1735
+ const resolved = (0, import_path2.resolve)(worldsPath);
1736
+ if (!(0, import_fs2.existsSync)(resolved)) {
1737
+ throw new Error(`Worlds path not found: ${resolved}`);
1738
+ }
1739
+ const stat = (0, import_fs2.statSync)(resolved);
1740
+ if (stat.isFile()) {
1741
+ return (0, import_fs2.readFileSync)(resolved, "utf-8");
1742
+ }
1743
+ if (stat.isDirectory()) {
1744
+ const files = (0, import_fs2.readdirSync)(resolved).filter(
1745
+ (f) => (0, import_path2.extname)(f) === ".md" && (f.endsWith(".worldmodel.md") || f.endsWith(".nv-world.md"))
1746
+ ).sort();
1747
+ if (files.length === 0) {
1748
+ throw new Error(
1749
+ `No .worldmodel.md or .nv-world.md files found in ${resolved}`
1750
+ );
1751
+ }
1752
+ return files.map((f) => {
1753
+ const content = (0, import_fs2.readFileSync)((0, import_path2.join)(resolved, f), "utf-8");
1754
+ return `<!-- worldmodel: ${f} -->
1755
+ ${content}`;
1756
+ }).join("\n\n---\n\n");
1757
+ }
1758
+ throw new Error(`Worlds path is neither a file nor a directory: ${resolved}`);
1759
+ }
1760
+ async function cmdThink(args) {
1761
+ const lensId = args.lens ?? process.env.RADIANT_LENS;
1762
+ if (!lensId) {
1763
+ process.stderr.write(
1764
+ `${RED}Error:${RESET} --lens <id> or RADIANT_LENS required.
1765
+ ${DIM}Available lenses: ${listLenses().join(", ")}${RESET}
1766
+ `
1767
+ );
1768
+ process.exit(1);
1769
+ }
1770
+ const worldsPath = args.worlds ?? process.env.RADIANT_WORLDS;
1771
+ if (!worldsPath) {
1772
+ process.stderr.write(
1773
+ `${RED}Error:${RESET} --worlds <dir> or RADIANT_WORLDS required.
1774
+ `
1775
+ );
1776
+ process.exit(1);
1777
+ }
1778
+ const apiKey = process.env.ANTHROPIC_API_KEY;
1779
+ if (!apiKey) {
1780
+ process.stderr.write(
1781
+ `${RED}Error:${RESET} ANTHROPIC_API_KEY environment variable not set.
1782
+ ${DIM}Set it to your Anthropic API key to use Radiant's AI features.${RESET}
1783
+ `
1784
+ );
1785
+ process.exit(1);
1786
+ }
1787
+ let query = args.query;
1788
+ if (!query && args.rest.length > 0) {
1789
+ query = args.rest.join(" ");
1790
+ }
1791
+ if (!query && !process.stdin.isTTY) {
1792
+ query = (0, import_fs2.readFileSync)(0, "utf-8").trim();
1793
+ }
1794
+ if (!query) {
1795
+ process.stderr.write(
1796
+ `${RED}Error:${RESET} No query provided.
1797
+ ${DIM}Use --query "...", pass as trailing args, or pipe via stdin.${RESET}
1798
+ `
1799
+ );
1800
+ process.exit(1);
1801
+ }
1802
+ const worldmodelContent = loadWorldmodelContent(worldsPath);
1803
+ const model = args.model ?? process.env.RADIANT_MODEL;
1804
+ const ai = createAnthropicAI(apiKey, model || void 0);
1805
+ process.stderr.write(
1806
+ `${DIM}Worlds: ${worldsPath}${RESET}
1807
+ ${DIM}Lens: ${lensId}${RESET}
1808
+ ${DIM}Model: ${model ?? "claude-sonnet-4-20250514 (default)"}${RESET}
1809
+
1810
+ `
1811
+ );
1812
+ const result = await think({
1813
+ worldmodelContent,
1814
+ lensId,
1815
+ query,
1816
+ ai
1817
+ });
1818
+ if (!result.voiceClean) {
1819
+ process.stderr.write(
1820
+ `${YELLOW}Voice violations detected (${result.voiceViolations.length}):${RESET}
1821
+ `
1822
+ );
1823
+ for (const v of result.voiceViolations) {
1824
+ process.stderr.write(
1825
+ ` ${YELLOW}\u26A0${RESET} "${v.phrase}" at offset ${v.offset}
1826
+ `
1827
+ );
1828
+ }
1829
+ process.stderr.write("\n");
1830
+ }
1831
+ if (args.json) {
1832
+ process.stdout.write(
1833
+ JSON.stringify(
1834
+ {
1835
+ response: result.response,
1836
+ lens: result.lens,
1837
+ voiceClean: result.voiceClean,
1838
+ voiceViolations: result.voiceViolations
1839
+ },
1840
+ null,
1841
+ 2
1842
+ ) + "\n"
1843
+ );
1844
+ } else {
1845
+ process.stdout.write(result.response + "\n");
1846
+ }
1847
+ if (!result.voiceClean) {
1848
+ process.exit(2);
1849
+ }
1850
+ }
1851
+ async function cmdEmergent(args) {
1852
+ const scopeStr = args.rest[0];
1853
+ if (!scopeStr) {
1854
+ process.stderr.write(
1855
+ `${RED}Error:${RESET} Scope required. Usage: neuroverse radiant emergent <owner/repo>
1856
+ `
1857
+ );
1858
+ process.exit(1);
1859
+ }
1860
+ const scope = parseRepoScope(scopeStr);
1861
+ const lensId = args.lens ?? process.env.RADIANT_LENS;
1862
+ if (!lensId) {
1863
+ process.stderr.write(
1864
+ `${RED}Error:${RESET} --lens <id> or RADIANT_LENS required.
1865
+ ${DIM}Available lenses: ${listLenses().join(", ")}${RESET}
1866
+ `
1867
+ );
1868
+ process.exit(1);
1869
+ }
1870
+ const worldsPath = args.worlds ?? process.env.RADIANT_WORLDS;
1871
+ if (!worldsPath) {
1872
+ process.stderr.write(
1873
+ `${RED}Error:${RESET} --worlds <dir> or RADIANT_WORLDS required.
1874
+ `
1875
+ );
1876
+ process.exit(1);
1877
+ }
1878
+ const anthropicKey = process.env.ANTHROPIC_API_KEY;
1879
+ if (!anthropicKey) {
1880
+ process.stderr.write(
1881
+ `${RED}Error:${RESET} ANTHROPIC_API_KEY environment variable not set.
1882
+ `
1883
+ );
1884
+ process.exit(1);
1885
+ }
1886
+ const githubToken = process.env.GITHUB_TOKEN;
1887
+ if (!githubToken) {
1888
+ process.stderr.write(
1889
+ `${RED}Error:${RESET} GITHUB_TOKEN environment variable not set.
1890
+ ${DIM}Set it to a GitHub PAT with repo read access.${RESET}
1891
+ `
1892
+ );
1893
+ process.exit(1);
1894
+ }
1895
+ const worldmodelContent = loadWorldmodelContent(worldsPath);
1896
+ const model = args.model ?? process.env.RADIANT_MODEL;
1897
+ const ai = createAnthropicAI(anthropicKey, model || void 0);
1898
+ const exocortexPath = args.exocortex ?? process.env.RADIANT_EXOCORTEX;
1899
+ let exocortexStatus = "not loaded";
1900
+ if (exocortexPath) {
1901
+ const ctx = readExocortex(exocortexPath);
1902
+ exocortexStatus = summarizeExocortex(ctx);
1903
+ }
1904
+ process.stderr.write(
1905
+ `${DIM}Scope: ${scope.owner}/${scope.repo}${RESET}
1906
+ ${DIM}Lens: ${lensId}${RESET}
1907
+ ${DIM}Model: ${model ?? "claude-sonnet-4-20250514 (default)"}${RESET}
1908
+ ${DIM}ExoCortex: ${exocortexStatus}${RESET}
1909
+ ${DIM}Fetching activity...${RESET}
1910
+
1911
+ `
1912
+ );
1913
+ const result = await emergent({
1914
+ scope,
1915
+ githubToken,
1916
+ worldmodelContent,
1917
+ lensId,
1918
+ ai,
1919
+ windowDays: 14,
1920
+ exocortexPath: exocortexPath || void 0
1921
+ });
1922
+ if (!result.voiceClean) {
1923
+ process.stderr.write(
1924
+ `${YELLOW}Voice violations (${result.voiceViolations.length}):${RESET}
1925
+ `
1926
+ );
1927
+ for (const v of result.voiceViolations) {
1928
+ process.stderr.write(
1929
+ ` ${YELLOW}\u26A0${RESET} "${v.phrase}" at offset ${v.offset}
1930
+ `
1931
+ );
1932
+ }
1933
+ process.stderr.write("\n");
1934
+ }
1935
+ if (args.json) {
1936
+ process.stdout.write(
1937
+ JSON.stringify(
1938
+ {
1939
+ text: result.text,
1940
+ frontmatter: result.frontmatter,
1941
+ scores: result.scores,
1942
+ eventCount: result.eventCount,
1943
+ voiceClean: result.voiceClean
1944
+ },
1945
+ null,
1946
+ 2
1947
+ ) + "\n"
1948
+ );
1949
+ } else {
1950
+ process.stdout.write(result.text + "\n");
1951
+ }
1952
+ }
1953
+ async function cmdLenses(args) {
1954
+ const subSub = args.rest[0];
1955
+ if (!subSub || subSub === "list") {
1956
+ const ids = listLenses();
1957
+ if (ids.length === 0) {
1958
+ process.stdout.write("No lenses registered.\n");
1959
+ } else {
1960
+ for (const id of ids) {
1961
+ process.stdout.write(`${id}
1962
+ `);
1963
+ }
1964
+ }
1965
+ return;
1966
+ }
1967
+ if (subSub === "describe") {
1968
+ const { getLens: getLens2 } = await Promise.resolve().then(() => (init_lenses(), lenses_exports));
1969
+ const id = args.rest[1];
1970
+ if (!id) {
1971
+ process.stderr.write(`${RED}Error:${RESET} Lens id required.
1972
+ `);
1973
+ process.exit(1);
1974
+ }
1975
+ const lens = getLens2(id);
1976
+ if (!lens) {
1977
+ process.stderr.write(
1978
+ `${RED}Error:${RESET} Lens "${id}" not found.
1979
+ ${DIM}Available: ${listLenses().join(", ")}${RESET}
1980
+ `
1981
+ );
1982
+ process.exit(1);
1983
+ }
1984
+ process.stdout.write(`${BOLD}${lens.name}${RESET}
1985
+ `);
1986
+ process.stdout.write(`${lens.description}
1987
+
1988
+ `);
1989
+ process.stdout.write(
1990
+ `${BOLD}Domains:${RESET} ${lens.primary_frame.domains.join(", ")}
1991
+ `
1992
+ );
1993
+ process.stdout.write(
1994
+ `${BOLD}Overlaps:${RESET} ${lens.primary_frame.overlaps.map((o) => o.emergent_state).join(", ")}
1995
+ `
1996
+ );
1997
+ process.stdout.write(
1998
+ `${BOLD}Center:${RESET} ${lens.primary_frame.center_identity}
1999
+ `
2000
+ );
2001
+ process.stdout.write(
2002
+ `${BOLD}Forbidden phrases:${RESET} ${lens.forbidden_phrases.length}
2003
+ `
2004
+ );
2005
+ process.stdout.write(
2006
+ `${BOLD}Vocabulary terms:${RESET} ${lens.vocabulary.proper_nouns.length} proper nouns, ${Object.keys(lens.vocabulary.preferred).length} substitutions
2007
+ `
2008
+ );
2009
+ process.stdout.write(
2010
+ `${BOLD}Exemplars:${RESET} ${lens.exemplar_refs.length}
2011
+ `
2012
+ );
2013
+ return;
2014
+ }
2015
+ process.stderr.write(
2016
+ `${RED}Error:${RESET} Unknown lenses subcommand "${subSub}".
2017
+ ${DIM}Use: lenses list | lenses describe <id>${RESET}
2018
+ `
2019
+ );
2020
+ process.exit(1);
2021
+ }
2022
+ async function main(argv) {
2023
+ const args = parseArgs(argv);
2024
+ if (args.help || !args.subcommand) {
2025
+ process.stdout.write(USAGE + "\n");
2026
+ return;
2027
+ }
2028
+ switch (args.subcommand) {
2029
+ case "think":
2030
+ return cmdThink(args);
2031
+ case "lenses":
2032
+ return cmdLenses(args);
2033
+ case "emergent":
2034
+ return cmdEmergent(args);
2035
+ case "decision":
2036
+ case "signals":
2037
+ case "drift":
2038
+ case "evolve":
2039
+ process.stderr.write(
2040
+ `${DIM}neuroverse radiant ${args.subcommand} is not yet implemented.${RESET}
2041
+ `
2042
+ );
2043
+ process.exit(1);
2044
+ break;
2045
+ default:
2046
+ process.stderr.write(
2047
+ `${RED}Unknown radiant subcommand: "${args.subcommand}"${RESET}
2048
+
2049
+ `
2050
+ );
2051
+ process.stdout.write(USAGE + "\n");
2052
+ process.exit(1);
2053
+ }
2054
+ }
2055
+ // Annotate the CommonJS export names for ESM import in node:
2056
+ 0 && (module.exports = {
2057
+ main
2058
+ });