@neuroverseos/governance 0.8.0 → 0.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -40,10 +40,15 @@ __export(radiant_exports, {
40
40
  classifyActorDomain: () => classifyActorDomain,
41
41
  classifyEvents: () => classifyEvents,
42
42
  composeSystemPrompt: () => composeSystemPrompt,
43
+ compressExocortex: () => compressExocortex,
44
+ compressLens: () => compressLens,
45
+ compressPriorReads: () => compressPriorReads,
46
+ compressWorldmodel: () => compressWorldmodel,
43
47
  computePersistence: () => computePersistence,
44
48
  createAnthropicAI: () => createAnthropicAI,
45
49
  createMockAI: () => createMockAI,
46
50
  createMockGitHubAdapter: () => createMockGitHubAdapter,
51
+ discoverWorlds: () => discoverWorlds,
47
52
  emergent: () => emergent,
48
53
  extractSignals: () => extractSignals,
49
54
  fetchDiscordActivity: () => fetchDiscordActivity,
@@ -51,6 +56,7 @@ __export(radiant_exports, {
51
56
  fetchGitHubOrgActivity: () => fetchGitHubOrgActivity,
52
57
  fetchNotionActivity: () => fetchNotionActivity,
53
58
  fetchSlackActivity: () => fetchSlackActivity,
59
+ formatActiveWorlds: () => formatActiveWorlds,
54
60
  formatDiscordSignalsForPrompt: () => formatDiscordSignalsForPrompt,
55
61
  formatExocortexForPrompt: () => formatExocortexForPrompt,
56
62
  formatNotionSignalsForPrompt: () => formatNotionSignalsForPrompt,
@@ -75,6 +81,7 @@ __export(radiant_exports, {
75
81
  scoreCyber: () => scoreCyber,
76
82
  scoreLife: () => scoreLife,
77
83
  scoreNeuroVerse: () => scoreNeuroVerse,
84
+ sovereignConduitLens: () => sovereignConduitLens,
78
85
  summarizeExocortex: () => summarizeExocortex,
79
86
  think: () => think,
80
87
  updateKnowledge: () => updateKnowledge,
@@ -564,9 +571,290 @@ var aukiBuilderLens = {
564
571
  rewrite: aukiBuilderRewrite
565
572
  };
566
573
 
574
+ // src/radiant/lenses/sovereign-conduit.ts
575
+ var SOVEREIGN_CONDUIT_FRAME = {
576
+ domains: [
577
+ "stewardship",
578
+ "sovereignty",
579
+ "integration"
580
+ ],
581
+ overlaps: [
582
+ {
583
+ domains: ["stewardship", "sovereignty"],
584
+ emergent_state: "Trust",
585
+ description: "I am safe to be myself. When the system protects AND preserves individual authority, trust emerges \u2014 the feeling that you can think freely because someone is watching the boundaries."
586
+ },
587
+ {
588
+ domains: ["sovereignty", "integration"],
589
+ emergent_state: "Possibility",
590
+ description: "My thinking can expand. When individual authority is preserved AND AI extends cognitive capability, possibility opens \u2014 the feeling that you can reach further without losing yourself."
591
+ },
592
+ {
593
+ domains: ["integration", "stewardship"],
594
+ emergent_state: "Responsibility",
595
+ description: "Power is used with care. When AI extends capability AND the system protects integrity, responsibility emerges \u2014 the feeling that expansion comes with guardrails, not recklessness."
596
+ }
597
+ ],
598
+ center_identity: "The Sovereign Conduit",
599
+ evaluation_questions: [
600
+ "Is the human maintaining authority over their decisions, or is decision ownership quietly shifting to the AI?",
601
+ "Is the AI extending thinking, or is it replacing thinking? Look for the difference: extension means the human understands and owns the output. Replacement means they accept it without engaging.",
602
+ "Are the boundaries between human thinking and AI output clear and visible, or are they blurring?",
603
+ "Is diversity of thought preserved? Are people thinking differently from each other, or is the system funneling everyone into the same patterns?",
604
+ "Would you feel safe letting a child learn in this system? If not, what specifically makes it unsafe?",
605
+ "If this felt wrong, could you leave? Is the exit real or theoretical?"
606
+ ],
607
+ scoring_rubric: "For any activity, ask: is the human still the author of their decisions? Is the AI helping them think further, or thinking for them? Are the rules of this space clear, fair, and safe \u2014 like the rules at a good friend's house? When something feels off, name the feeling first, then the mechanism. Use everyday language. If a non-technical person couldn't understand the observation, rephrase it until they could.",
608
+ domain_skills: {
609
+ "stewardship": [
610
+ "boundary setting",
611
+ "risk awareness",
612
+ "ethical judgment",
613
+ "system protection",
614
+ "conflict stabilization",
615
+ "responsibility signaling",
616
+ "harm detection",
617
+ "constraint design"
618
+ ],
619
+ "sovereignty": [
620
+ "independent thinking",
621
+ "decision ownership",
622
+ "self-trust",
623
+ "value clarity",
624
+ "cognitive resistance",
625
+ "identity anchoring",
626
+ "perspective holding",
627
+ "authentic expression"
628
+ ],
629
+ "integration": [
630
+ "AI collaboration",
631
+ "cognitive expansion",
632
+ "prompt framing",
633
+ "insight synthesis",
634
+ "signal interpretation",
635
+ "tool fluency",
636
+ "co-creation",
637
+ "iterative thinking"
638
+ ]
639
+ },
640
+ output_translation: {
641
+ never_surface_in_output: [
642
+ "Stewardship",
643
+ "Sovereignty",
644
+ "Integration"
645
+ ],
646
+ surface_freely: [
647
+ "Trust",
648
+ "Possibility",
649
+ "Responsibility",
650
+ "Sovereign Conduit"
651
+ ],
652
+ translation_examples: [
653
+ {
654
+ internal_reasoning: "Stewardship is strong",
655
+ external_expression: "the boundaries are clear and the system feels safe to operate inside"
656
+ },
657
+ {
658
+ internal_reasoning: "Sovereignty is weakening",
659
+ external_expression: "decision ownership is quietly shifting \u2014 the human is accepting AI output without engaging with it"
660
+ },
661
+ {
662
+ internal_reasoning: "Integration is high but Stewardship is low",
663
+ external_expression: "the AI is expanding capability fast, but nobody is watching the guardrails \u2014 that's power without responsibility"
664
+ }
665
+ ]
666
+ }
667
+ };
668
+ var SOVEREIGN_CONDUIT_VOCABULARY = {
669
+ proper_nouns: [
670
+ "NeuroVerseOS",
671
+ "Sovereign Conduit",
672
+ "LifeOS",
673
+ "CyberOS",
674
+ "NeuroverseOS"
675
+ ],
676
+ preferred: {
677
+ "worldmodel": "thinking constitution",
678
+ "invariant": "non-negotiable rule",
679
+ "governance": "the rules of the space",
680
+ "alignment": "how well the work matches what was declared",
681
+ "drift": "quiet shift away from what was intended",
682
+ "signal": "something observable",
683
+ "evidence gate": "how much we need to see before we speak",
684
+ "actor domain": "who did the work \u2014 a person, an AI, or both together",
685
+ "rendering lens": "how the system speaks",
686
+ "candidate pattern": "something noticed but not yet named as important",
687
+ "cognitive liberty": "your right to think for yourself",
688
+ "homogenization": "everyone being funneled into the same patterns"
689
+ },
690
+ architecture: [
691
+ "thinking constitution",
692
+ "thinking space",
693
+ "cognitive extension",
694
+ "behavioral model",
695
+ "governance frame",
696
+ "world file",
697
+ "cocoon"
698
+ ],
699
+ economic: [],
700
+ framing: [
701
+ "humanity first",
702
+ "in constant learning",
703
+ "in shared teaching",
704
+ "extension not replacement",
705
+ "safe to think freely",
706
+ "the rules of this house",
707
+ "sovereign over your own thinking",
708
+ "idea calculator",
709
+ "Spock in your life",
710
+ "Jarvis in your life",
711
+ "funneling into fields",
712
+ "diversity of thought",
713
+ "thinking for yourself"
714
+ ],
715
+ jargon_translations: {
716
+ "worldmodel": "thinking constitution",
717
+ "invariant": "non-negotiable rule",
718
+ "canonical pattern": "something we're tracking by name",
719
+ "candidate pattern": "something noticed but not yet tracked",
720
+ "evidence gate": "how much we need to see before we speak up",
721
+ "signal extraction": "reading what happened",
722
+ "alignment score": "how well the work matches what was declared",
723
+ "actor domain": "who did this \u2014 a person, an AI, or both",
724
+ "presence-based averaging": "only counting what actually happened",
725
+ "drift detection": "noticing when things quietly shift",
726
+ "INSUFFICIENT_EVIDENCE": "not enough to say yet",
727
+ "UNAVAILABLE": "can't measure this yet",
728
+ "rendering lens": "how the system speaks to you"
729
+ }
730
+ };
731
+ var SOVEREIGN_CONDUIT_VOICE = {
732
+ register: "warm, accessible, teaching. Like a thoughtful parent explaining how the world works \u2014 not talking down, but making the complex feel natural. Use everyday analogies. Name emotions. If a non-technical person couldn't understand the output, it's wrong.",
733
+ active_voice: "preferred",
734
+ specificity: "preferred",
735
+ hype_vocabulary: "forbidden",
736
+ hedging: "discouraged",
737
+ playfulness: "allowed",
738
+ close_with_strategic_frame: "preferred",
739
+ punchline_move: "sparing",
740
+ honesty_about_failure: "required",
741
+ output_translation: `Reason internally through the three-domain frame (Stewardship, Sovereignty, Integration). Express externally through the skills inside each domain and the overlap feelings (Trust, Possibility, Responsibility). Do NOT surface the bucket names as labels. Readers understand "the boundaries feel safe" not "Stewardship is strong." Use everyday analogies \u2014 mom rules, friend's house rules, idea calculator. Name the emotion before the mechanism.`
742
+ };
743
+ var SOVEREIGN_CONDUIT_FORBIDDEN = Object.freeze([
744
+ // Bucket names as labels
745
+ "stewardship is",
746
+ "sovereignty is",
747
+ "integration is",
748
+ // AI-assistant hedging
749
+ "it may be beneficial to consider",
750
+ "there appears to be",
751
+ "one possible interpretation",
752
+ "it might be worth exploring",
753
+ "consider whether",
754
+ "it is worth noting",
755
+ // Corporate
756
+ "stakeholders",
757
+ "synergy",
758
+ "value proposition",
759
+ "paradigm shift",
760
+ "best-in-class",
761
+ "industry-leading",
762
+ // Cold/mechanical
763
+ "the system recommends",
764
+ "analysis suggests",
765
+ "data indicates",
766
+ "metrics show",
767
+ "according to the model"
768
+ ]);
769
+ var SOVEREIGN_CONDUIT_PREFERRED = Object.freeze([
770
+ // Everyday analogies
771
+ "This is like [everyday analogy]. [What it means].",
772
+ "Think of it like the rules at a friend's house \u2014 [application].",
773
+ "The boundaries are [state]. That means [feeling].",
774
+ // Emotion-first
775
+ "This feels like [emotion] because [mechanism].",
776
+ "Trust is [emerging/breaking] here \u2014 [specific evidence].",
777
+ "Possibility is opening because [evidence]. But [caveat if any].",
778
+ // Teaching voice
779
+ "Here's what's actually happening: [plain explanation].",
780
+ "The question to ask yourself: [question].",
781
+ "The difference between [A] and [B] matters here: [why].",
782
+ // Sovereignty checks
783
+ "Are you still the author of this decision, or did the AI make it for you?",
784
+ "The AI extended your thinking here. That's working.",
785
+ "The AI replaced your thinking here. That's the drift to watch.",
786
+ // Safety
787
+ "Would you feel safe letting someone learn in this environment? [Why/why not].",
788
+ "The exit is real \u2014 you can [specific exit path].",
789
+ "The exit isn't real here \u2014 [what's blocking it]."
790
+ ]);
791
+ var SOVEREIGN_CONDUIT_STRATEGIC = Object.freeze([
792
+ "Safety before expansion \u2014 always. No exception.",
793
+ "Sovereignty before convenience \u2014 the right to think for yourself is not a feature to optimize away.",
794
+ "Extension, not replacement \u2014 AI should make your thinking bigger, not do your thinking for you.",
795
+ "Diversity over uniformity \u2014 different thinkers produce different ideas, and that's the engine of progress.",
796
+ "The rules should be visible \u2014 like a good house, you know the rules before you walk in.",
797
+ "Exit must be real \u2014 if you can't leave a system that feels wrong, it's not governance, it's a cage.",
798
+ "Teach, don't lecture \u2014 help people think for themselves, not think what you think.",
799
+ "Name the feeling first \u2014 when something is off, the emotion arrives before the analysis. Trust that."
800
+ ]);
801
+ var SOVEREIGN_CONDUIT_EXEMPLARS = Object.freeze([
802
+ {
803
+ path: "neuroverseos-sovereign-conduit.worldmodel.md",
804
+ title: "The Sovereign Conduit Worldmodel",
805
+ exhibits: ["stewardship", "sovereignty", "integration"],
806
+ integration_quality: "full \u2014 all three domains defined, overlaps named, center identity declared",
807
+ notes: 'The source worldmodel. The tagline "Humanity first. In constant learning. In shared teaching." is the voice compressed to its essence. Use this as the north star for tone calibration.'
808
+ }
809
+ ]);
810
+ function sovereignConduitRewrite(pattern) {
811
+ if (pattern.evidence.cited_invariant) {
812
+ return {
813
+ ...pattern,
814
+ framing: "non-negotiable rule tested",
815
+ emphasis: "name the rule, name who it protects, name what would happen without it",
816
+ compress: false
817
+ // Sovereign Conduit is warm, not compressed
818
+ };
819
+ }
820
+ if (pattern.type === "candidate") {
821
+ return {
822
+ ...pattern,
823
+ framing: "something new noticed",
824
+ emphasis: "explain what was seen in everyday language, ask whether it matters",
825
+ compress: false
826
+ };
827
+ }
828
+ return {
829
+ ...pattern,
830
+ framing: "what this means for the people in the system",
831
+ emphasis: "humanity + sovereignty + learning",
832
+ compress: false
833
+ };
834
+ }
835
+ var sovereignConduitLens = {
836
+ name: "sovereign-conduit",
837
+ description: "The NeuroVerseOS base lens. Warm, accessible, teaching. Evaluates activity through Stewardship (safety), Sovereignty (authority over thinking), and Integration (AI as cognitive extension). Uses everyday analogies \u2014 mom rules, friend's house, idea calculator. Names emotions before mechanisms. If a non-technical person can't understand the output, the voice is wrong. Humanity first. In constant learning. In shared teaching.",
838
+ primary_frame: {
839
+ domains: SOVEREIGN_CONDUIT_FRAME.domains,
840
+ overlaps: SOVEREIGN_CONDUIT_FRAME.overlaps,
841
+ center_identity: SOVEREIGN_CONDUIT_FRAME.center_identity,
842
+ evaluation_questions: SOVEREIGN_CONDUIT_FRAME.evaluation_questions,
843
+ scoring_rubric: SOVEREIGN_CONDUIT_FRAME.scoring_rubric
844
+ },
845
+ vocabulary: SOVEREIGN_CONDUIT_VOCABULARY,
846
+ voice: SOVEREIGN_CONDUIT_VOICE,
847
+ forbidden_phrases: SOVEREIGN_CONDUIT_FORBIDDEN,
848
+ preferred_patterns: SOVEREIGN_CONDUIT_PREFERRED,
849
+ strategic_patterns: SOVEREIGN_CONDUIT_STRATEGIC,
850
+ exemplar_refs: SOVEREIGN_CONDUIT_EXEMPLARS,
851
+ rewrite: sovereignConduitRewrite
852
+ };
853
+
567
854
  // src/radiant/lenses/index.ts
568
855
  var LENSES = Object.freeze({
569
- "auki-builder": aukiBuilderLens
856
+ "auki-builder": aukiBuilderLens,
857
+ "sovereign-conduit": sovereignConduitLens
570
858
  });
571
859
  function getLens(id) {
572
860
  return LENSES[id];
@@ -710,91 +998,166 @@ var DEFAULT_SIGNAL_EXTRACTORS = Object.freeze([
710
998
  DECISION_MOMENTUM_EXTRACTOR
711
999
  ]);
712
1000
 
1001
+ // src/radiant/core/compress.ts
1002
+ function compressWorldmodel(content) {
1003
+ const lines = [];
1004
+ const missionMatch = content.match(/##\s*Mission\s*\n+(?:<!--[\s\S]*?-->\s*\n+)?(.*?)(?:\n\n|\n##|$)/s);
1005
+ if (missionMatch) {
1006
+ const mission = missionMatch[1].trim().split("\n")[0];
1007
+ lines.push(`Mission: ${mission}`);
1008
+ }
1009
+ const domainMatches = content.matchAll(/###\s+([^\n]+)/g);
1010
+ const domains = [];
1011
+ for (const m of domainMatches) {
1012
+ const name = m[1].trim();
1013
+ if (name !== "Skills" && name !== "Values" && !name.startsWith("####")) {
1014
+ domains.push(name);
1015
+ }
1016
+ }
1017
+ if (domains.length > 0) {
1018
+ lines.push(`Domains: ${domains.join(", ")}`);
1019
+ }
1020
+ const invariantSection = content.match(/(?:Invariants|## Invariants|invariants)([\s\S]*?)(?:\n#|\n---|\n\n\n)/i);
1021
+ if (invariantSection) {
1022
+ const invLines = invariantSection[1].match(/^[-*]\s+`?([^`\n]+)/gm);
1023
+ if (invLines) {
1024
+ lines.push("\nInvariants:");
1025
+ for (const inv of invLines.slice(0, 10)) {
1026
+ lines.push(inv.trim());
1027
+ }
1028
+ }
1029
+ }
1030
+ const prioritySection = content.match(/(?:Decision Priorities|## Decision Priorities)([\s\S]*?)(?:\n#|\n---|\n\n\n)/i);
1031
+ if (prioritySection) {
1032
+ const priLines = prioritySection[1].match(/^[-*]\s+.+>.+/gm);
1033
+ if (priLines) {
1034
+ lines.push("\nPriorities:");
1035
+ for (const pri of priLines.slice(0, 10)) {
1036
+ lines.push(pri.trim());
1037
+ }
1038
+ }
1039
+ }
1040
+ const signalSection = content.match(/(?:## Signals)([\s\S]*?)(?:\n#|\n---|\n\n\n)/i);
1041
+ if (signalSection) {
1042
+ const sigLines = signalSection[1].match(/^[-*]\s+(\w+)/gm);
1043
+ if (sigLines) {
1044
+ lines.push(`
1045
+ Signals: ${sigLines.map((s) => s.replace(/^[-*]\s+/, "")).join(", ")}`);
1046
+ }
1047
+ }
1048
+ const driftSection = content.match(/(?:Drift Behaviors|## Drift Behaviors)([\s\S]*?)(?:\n#|\n---|\n\n\n)/i);
1049
+ if (driftSection) {
1050
+ const driftLines = driftSection[1].match(/^[-*]\s+(.+)/gm);
1051
+ if (driftLines) {
1052
+ lines.push("\nDrift behaviors:");
1053
+ for (const d of driftLines.slice(0, 5)) {
1054
+ lines.push(d.trim());
1055
+ }
1056
+ }
1057
+ }
1058
+ const compressed = lines.join("\n");
1059
+ if (compressed.length < 50) {
1060
+ return content.slice(0, 2e3) + "\n[truncated]";
1061
+ }
1062
+ return compressed;
1063
+ }
1064
+ function compressExocortex(ctx) {
1065
+ const lines = [];
1066
+ if (ctx.attention) {
1067
+ lines.push(`Attention: ${firstMeaningfulLine(ctx.attention)}`);
1068
+ }
1069
+ if (ctx.goals) {
1070
+ lines.push(`Goals: ${firstNLines(ctx.goals, 3)}`);
1071
+ }
1072
+ if (ctx.sprint) {
1073
+ lines.push(`Sprint: ${firstNLines(ctx.sprint, 3)}`);
1074
+ }
1075
+ if (ctx.identity) {
1076
+ lines.push(`Identity: ${firstMeaningfulLine(ctx.identity)}`);
1077
+ }
1078
+ if (ctx.organization) {
1079
+ lines.push(`Org: ${firstMeaningfulLine(ctx.organization)}`);
1080
+ }
1081
+ return lines.join("\n");
1082
+ }
1083
+ function compressLens(lens) {
1084
+ return {
1085
+ evaluationQuestions: lens.primary_frame.evaluation_questions.map((q, i) => `${i + 1}. ${q}`).join("\n"),
1086
+ scoringRubric: lens.primary_frame.scoring_rubric,
1087
+ forbiddenPhrases: lens.forbidden_phrases.join(", "),
1088
+ jargonTranslations: Object.entries(lens.vocabulary.jargon_translations).map(([k, v]) => `${k} \u2192 ${v}`).join("; "),
1089
+ strategicPatterns: lens.strategic_patterns.slice(0, 5).join("\n")
1090
+ };
1091
+ }
1092
+ function compressPriorReads(reads) {
1093
+ if (reads.length === 0) return "";
1094
+ const patternCounts = /* @__PURE__ */ new Map();
1095
+ for (const read of reads) {
1096
+ for (const name of read.patternNames) {
1097
+ patternCounts.set(name, (patternCounts.get(name) ?? 0) + 1);
1098
+ }
1099
+ }
1100
+ const sorted = [...patternCounts.entries()].sort((a, b) => b[1] - a[1]);
1101
+ if (sorted.length === 0) {
1102
+ return `${reads.length} prior reads, no patterns extracted.`;
1103
+ }
1104
+ const patternList = sorted.map(([name, count]) => `${name} (${count}x)`).join(", ");
1105
+ return `${reads.length} prior reads. Patterns seen: ${patternList}. If these recur, note persistence.`;
1106
+ }
1107
+ function firstMeaningfulLine(text) {
1108
+ const lines = text.split("\n").filter((l) => {
1109
+ const t = l.trim();
1110
+ return t.length > 0 && !t.startsWith("#") && !t.startsWith("<!--");
1111
+ });
1112
+ return lines[0]?.slice(0, 200) ?? "";
1113
+ }
1114
+ function firstNLines(text, n) {
1115
+ const lines = text.split("\n").filter((l) => {
1116
+ const t = l.trim();
1117
+ return t.length > 0 && !t.startsWith("#") && !t.startsWith("<!--");
1118
+ });
1119
+ return lines.slice(0, n).map((l) => l.slice(0, 150)).join("; ");
1120
+ }
1121
+
713
1122
  // src/radiant/core/prompt.ts
714
1123
  function composeSystemPrompt(worldmodelContent, lens) {
715
- const sections = [];
716
- sections.push(
717
- `## Worldmodel
718
-
719
- You are operating inside a governed environment. The worldmodel below
720
- defines the invariants, signals, decision priorities, and behavioral
721
- expectations for this organization. Every response you produce must
722
- be grounded in this worldmodel.
723
-
724
- ` + worldmodelContent
725
- );
726
- const frame = lens.primary_frame;
727
- const questionsBlock = frame.evaluation_questions.map((q, i) => `${i + 1}. ${q}`).join("\n");
728
- const overlapsBlock = frame.overlaps.map(
729
- (o) => `- ${o.domains[0]} + ${o.domains[1]} = **${o.emergent_state}**: ${o.description}`
730
- ).join("\n");
731
- sections.push(
732
- `## How to Think (Analytical Frame: ${lens.name})
733
-
734
- ${frame.scoring_rubric}
735
-
736
- ### Evaluation questions to reason through
1124
+ const compressedWorld = compressWorldmodel(worldmodelContent);
1125
+ const cl = compressLens(lens);
1126
+ const overlapsBlock = lens.primary_frame.overlaps.map((o) => `${o.domains[0]} + ${o.domains[1]} = ${o.emergent_state}`).join("\n");
1127
+ return [
1128
+ // Section 1: Compressed worldmodel
1129
+ `## Worldmodel (compressed)
737
1130
 
738
- ${questionsBlock}
1131
+ ${compressedWorld}`,
1132
+ // Section 2: Analytical frame (evaluation questions + rubric)
1133
+ `## How to Think
739
1134
 
740
- ### Overlap emergent states
1135
+ ${cl.scoringRubric}
741
1136
 
742
- ${overlapsBlock}
1137
+ Questions:
1138
+ ${cl.evaluationQuestions}
743
1139
 
744
- ### Center identity
1140
+ Overlaps: ${overlapsBlock}
1141
+ Center: ${lens.primary_frame.center_identity}
745
1142
 
746
- When all dimensions integrate fully: **${frame.center_identity}**. Surface this sparingly \u2014 only when the integration is genuinely complete.`
747
- );
748
- const vocabPreferred = Object.entries(lens.vocabulary.preferred).map(([generic, native]) => `- "${generic}" \u2192 **${native}**`).join("\n");
749
- const vocabArchitecture = lens.vocabulary.architecture.map((t) => `\`${t}\``).join(", ");
750
- const vocabProperNouns = lens.vocabulary.proper_nouns.map((n) => `**${n}**`).join(", ");
751
- const strategicBlock = lens.strategic_patterns.map((p) => `- ${p}`).join("\n");
752
- sections.push(
753
- `## How to Speak (Voice: ${lens.name})
1143
+ Translate before output: ${cl.jargonTranslations}`,
1144
+ // Section 3: Voice (compressed — register + key rules only)
1145
+ `## Voice: ${lens.name}
754
1146
 
755
1147
  Register: ${lens.voice.register}
756
-
757
- Rules:
758
- - Active voice: ${lens.voice.active_voice}
759
- - Named specificity (people, places, numbers): ${lens.voice.specificity}
760
- - Hype vocabulary: ${lens.voice.hype_vocabulary}
761
- - Hedging / qualified phrasing: ${lens.voice.hedging}
762
- - Playfulness: ${lens.voice.playfulness}
763
- - Close with strategic frame: ${lens.voice.close_with_strategic_frame}
764
- - Honesty about failure: ${lens.voice.honesty_about_failure}
765
-
766
- ### Output translation discipline
1148
+ Active voice: ${lens.voice.active_voice}. Specificity: ${lens.voice.specificity}. Hedging: ${lens.voice.hedging}. Hype: ${lens.voice.hype_vocabulary}. Honesty about failure: ${lens.voice.honesty_about_failure}.
767
1149
 
768
1150
  ${lens.voice.output_translation}
769
1151
 
770
- ### Vocabulary
771
-
772
- Proper nouns (use literally): ${vocabProperNouns}
773
-
774
- Preferred term substitutions:
775
- ${vocabPreferred}
776
-
777
- Architecture vocabulary: ${vocabArchitecture}
778
-
779
- ### Strategic decision patterns
780
-
781
- When recommending action, these patterns reflect how this organization resolves tradeoffs:
782
-
783
- ${strategicBlock}`
784
- );
785
- const forbiddenBlock = lens.forbidden_phrases.map((p) => `- "${p}"`).join("\n");
786
- sections.push(
1152
+ Strategic patterns:
1153
+ ${cl.strategicPatterns}`,
1154
+ // Section 4: Guardrails (forbidden phrases as comma-separated, not bulleted)
787
1155
  `## Guardrails
788
1156
 
789
- Do NOT use any of these phrases in your response. If you catch yourself
790
- reaching for one, rephrase in direct, active, specific language instead.
791
-
792
- ${forbiddenBlock}
1157
+ Do NOT use: ${cl.forbiddenPhrases}
793
1158
 
794
- If your response would violate a worldmodel invariant, state the conflict
795
- explicitly and propose an alternative that honors the invariant.`
796
- );
797
- return sections.join("\n\n---\n\n");
1159
+ If a response would violate a worldmodel invariant, state the conflict and propose an alternative.`
1160
+ ].join("\n\n---\n\n");
798
1161
  }
799
1162
 
800
1163
  // src/radiant/core/voice-check.ts
@@ -1105,7 +1468,7 @@ function createMockGitHubAdapter(fixedEvents) {
1105
1468
  // src/radiant/adapters/exocortex.ts
1106
1469
  var import_fs = require("fs");
1107
1470
  var import_path = require("path");
1108
- function readExocortex(dirPath) {
1471
+ function readExocortex(dirPath, repoName) {
1109
1472
  const dir = (0, import_path.resolve)(dirPath);
1110
1473
  let filesLoaded = 0;
1111
1474
  function tryRead(...paths) {
@@ -1124,16 +1487,64 @@ function readExocortex(dirPath) {
1124
1487
  }
1125
1488
  return null;
1126
1489
  }
1490
+ const attention = tryRead("attention.md");
1491
+ const goals = tryRead("goals.md");
1492
+ const identity = tryRead("identity.md", "user.md");
1493
+ const organization = tryRead("org/organization.md", "org/src/organization.md");
1494
+ const methods = tryRead("org/methods.md", "org/src/methods.md");
1495
+ let sprint = null;
1496
+ let projectContext = null;
1497
+ if (repoName) {
1498
+ const projectPaths = [
1499
+ repoName,
1500
+ repoName.toLowerCase(),
1501
+ repoName.replace(/-/g, "_")
1502
+ ];
1503
+ for (const projectDir of projectPaths) {
1504
+ const projectSprint = tryRead(
1505
+ `${projectDir}/src/sprint.md`,
1506
+ `${projectDir}/sprint.md`
1507
+ );
1508
+ if (projectSprint) {
1509
+ sprint = projectSprint;
1510
+ break;
1511
+ }
1512
+ }
1513
+ for (const projectDir of projectPaths) {
1514
+ const roadmap = tryRead(
1515
+ `${projectDir}/roadmap.md`,
1516
+ `${projectDir}/src/roadmap.md`
1517
+ );
1518
+ if (roadmap) {
1519
+ projectContext = roadmap;
1520
+ break;
1521
+ }
1522
+ }
1523
+ }
1524
+ if (!sprint) {
1525
+ sprint = tryRead("sprint.md", "src/sprint.md");
1526
+ }
1127
1527
  const ctx = {
1128
- attention: tryRead("attention.md"),
1129
- goals: tryRead("goals.md"),
1130
- identity: tryRead("identity.md"),
1131
- sprint: tryRead("sprint.md", "src/sprint.md"),
1132
- organization: tryRead("org/organization.md", "org/src/organization.md"),
1133
- methods: tryRead("org/methods.md", "org/src/methods.md"),
1528
+ attention,
1529
+ goals,
1530
+ identity,
1531
+ sprint,
1532
+ organization,
1533
+ methods,
1134
1534
  source: dir,
1135
1535
  filesLoaded
1136
1536
  };
1537
+ if (projectContext && ctx.sprint) {
1538
+ ctx.sprint = `${ctx.sprint}
1539
+
1540
+ ---
1541
+ Project roadmap:
1542
+ ${projectContext}`;
1543
+ } else if (projectContext) {
1544
+ ctx.sprint = `Project roadmap:
1545
+ ${projectContext}`;
1546
+ ctx.filesLoaded++;
1547
+ }
1137
1548
  return ctx;
1138
1549
  }
1139
1550
  function formatExocortexForPrompt(ctx) {
@@ -1637,15 +2048,17 @@ function buildInterpretationPrompt(input) {
1637
2048
  const eventSample = formatEventSample(input.events, 30);
1638
2049
  const canonicalList = (input.canonicalPatterns ?? []).length > 0 ? `Patterns the organization has already named (use these names if you see them):
1639
2050
  ${input.canonicalPatterns.map((p) => `- ${p}`).join("\n")}` : "No patterns have been named yet. Everything you observe is new.";
2051
+ const compressedWorld = compressWorldmodel(input.worldmodelContent);
2052
+ const cl = compressLens(input.lens);
1640
2053
  const frame = input.lens.primary_frame;
1641
2054
  const evalQuestions = frame.evaluation_questions.map((q, i) => `${i + 1}. ${q}`).join("\n");
1642
- const forbiddenList = input.lens.forbidden_phrases.map((p) => `- "${p}"`).join("\n");
1643
- const jargonTable = Object.entries(input.lens.vocabulary.jargon_translations).map(([internal, plain]) => ` "${internal}" \u2192 "${plain}"`).join("\n");
2055
+ const forbiddenList = cl.forbiddenPhrases;
2056
+ const jargonTable = cl.jargonTranslations;
1644
2057
  return `You are a behavioral intelligence system reading team activity and producing a read for the reader who needs to act on it.
1645
2058
 
1646
- ## Context the reader has loaded
2059
+ ## Worldmodel (compressed)
1647
2060
 
1648
- ${input.worldmodelContent}
2061
+ ${compressedWorld}
1649
2062
 
1650
2063
  ## What happened this window
1651
2064
 
@@ -1836,30 +2249,16 @@ Window: last ${input.windowDays} days \xB7 ${input.eventCount} events
1836
2249
  Lens: ${input.lens.name}`
1837
2250
  );
1838
2251
  if (input.patterns.length > 0) {
1839
- const canonical = input.patterns.filter((p) => p.type === "canonical");
1840
- const candidates = input.patterns.filter((p) => p.type === "candidate");
1841
2252
  let emergentBlock = "EMERGENT\n";
1842
- if (canonical.length > 0) {
1843
- for (const p of canonical) {
1844
- emergentBlock += `
2253
+ for (const p of input.patterns) {
2254
+ emergentBlock += `
1845
2255
  ${p.name}
1846
2256
  `;
1847
- emergentBlock += ` ${p.description}
2257
+ emergentBlock += ` ${p.description}
1848
2258
  `;
1849
- }
1850
- }
1851
- if (candidates.length > 0) {
1852
- emergentBlock += "\n Emergent (candidates \u2014 not yet in worldmodel)\n";
1853
- for (const p of candidates) {
1854
- emergentBlock += `
1855
- ${p.name} (candidate)
1856
- `;
1857
- emergentBlock += ` ${p.description}
2259
+ if (p.evidence.cited_invariant) {
2260
+ emergentBlock += ` Cited invariant: ${p.evidence.cited_invariant}
1858
2261
  `;
1859
- if (p.evidence.cited_invariant) {
1860
- emergentBlock += ` Cited invariant: ${p.evidence.cited_invariant}
1861
- `;
1862
- }
1863
2262
  }
1864
2263
  }
1865
2264
  sections.push(emergentBlock.trimEnd());
@@ -2067,6 +2466,75 @@ function serializeYAML(obj, indent = 0) {
2067
2466
  `;
2068
2467
  }
2069
2468
 
2469
+ // src/radiant/core/discovery.ts
2470
+ var import_fs2 = require("fs");
2471
+ var import_path2 = require("path");
2472
+ var import_os = require("os");
2473
+ function discoverWorlds(options) {
2474
+ const worlds = [];
2475
+ const userDir = options?.userWorldsDir ?? (0, import_path2.join)((0, import_os.homedir)(), ".neuroverse", "worlds");
2476
+ if ((0, import_fs2.existsSync)(userDir)) {
2477
+ worlds.push(...loadWorldsFromDir(userDir, "user"));
2478
+ }
2479
+ if (options?.explicitWorldsDir) {
2480
+ worlds.push(...loadWorldsFromDir(options.explicitWorldsDir, "repo"));
2481
+ } else if (options?.repoDir) {
2482
+ const repoPaths = [
2483
+ (0, import_path2.join)(options.repoDir, "worlds"),
2484
+ (0, import_path2.join)(options.repoDir, ".neuroverse", "worlds")
2485
+ ];
2486
+ for (const p of repoPaths) {
2487
+ if ((0, import_fs2.existsSync)(p)) {
2488
+ worlds.push(...loadWorldsFromDir(p, "repo"));
2489
+ break;
2490
+ }
2491
+ }
2492
+ }
2493
+ const combinedContent = worlds.map((w) => `<!-- world: ${w.name} (${w.source}) -->
2494
+ ${w.content}`).join("\n\n---\n\n");
2495
+ const summary = worlds.length === 0 ? "no worlds discovered" : worlds.map((w) => `${w.name} (${w.source})`).join(", ");
2496
+ return { worlds, combinedContent, summary };
2497
+ }
2498
+ function formatActiveWorlds(stack) {
2499
+ if (stack.worlds.length === 0) return "No worlds loaded.";
2500
+ const lines = ["ACTIVE WORLDS", ""];
2501
+ for (const w of stack.worlds) {
2502
+ const sourceLabel = w.source === "base" ? "universal" : w.source === "user" ? "personal" : "this repo";
2503
+ lines.push(` ${w.name} (${sourceLabel})`);
2504
+ }
2505
+ return lines.join("\n");
2506
+ }
2507
+ function loadWorldsFromDir(dirPath, source) {
2508
+ const dir = (0, import_path2.resolve)(dirPath);
2509
+ if (!(0, import_fs2.existsSync)(dir)) return [];
2510
+ const stat = (0, import_fs2.statSync)(dir);
2511
+ if (stat.isFile() && dir.endsWith(".md")) {
2512
+ try {
2513
+ return [{
2514
+ name: (0, import_path2.basename)(dir).replace(/\.worldmodel\.md$/, "").replace(/\.nv-world\.md$/, ""),
2515
+ source,
2516
+ path: dir,
2517
+ content: (0, import_fs2.readFileSync)(dir, "utf-8")
2518
+ }];
2519
+ } catch {
2520
+ return [];
2521
+ }
2522
+ }
2523
+ if (!stat.isDirectory()) return [];
2524
+ const files = (0, import_fs2.readdirSync)(dir).filter(
2525
+ (f) => f.endsWith(".worldmodel.md") || f.endsWith(".nv-world.md")
2526
+ ).sort();
2527
+ return files.map((f) => {
2528
+ const fullPath = (0, import_path2.join)(dir, f);
2529
+ return {
2530
+ name: f.replace(/\.worldmodel\.md$/, "").replace(/\.nv-world\.md$/, ""),
2531
+ source,
2532
+ path: fullPath,
2533
+ content: (0, import_fs2.readFileSync)(fullPath, "utf-8")
2534
+ };
2535
+ });
2536
+ }
2537
+
2070
2538
  // src/engine/text-utils.ts
2071
2539
  function normalizeEventText(event) {
2072
2540
  return [
@@ -3153,10 +3621,10 @@ function verdictToEvent(status, intent) {
3153
3621
  // src/loader/world-loader.ts
3154
3622
  async function loadWorldFromDirectory(dirPath) {
3155
3623
  const { readFile } = await import("fs/promises");
3156
- const { join: join3 } = await import("path");
3157
- const { readdirSync: readdirSync3 } = await import("fs");
3624
+ const { join: join4 } = await import("path");
3625
+ const { readdirSync: readdirSync4 } = await import("fs");
3158
3626
  async function readJson(filename) {
3159
- const filePath = join3(dirPath, filename);
3627
+ const filePath = join4(dirPath, filename);
3160
3628
  try {
3161
3629
  const content = await readFile(filePath, "utf-8");
3162
3630
  return JSON.parse(content);
@@ -3186,11 +3654,11 @@ async function loadWorldFromDirectory(dirPath) {
3186
3654
  const metadataJson = await readJson("metadata.json");
3187
3655
  const rules = [];
3188
3656
  try {
3189
- const rulesDir = join3(dirPath, "rules");
3190
- const ruleFiles = readdirSync3(rulesDir).filter((f) => f.endsWith(".json")).sort();
3657
+ const rulesDir = join4(dirPath, "rules");
3658
+ const ruleFiles = readdirSync4(rulesDir).filter((f) => f.endsWith(".json")).sort();
3191
3659
  for (const file of ruleFiles) {
3192
3660
  try {
3193
- const content = await readFile(join3(rulesDir, file), "utf-8");
3661
+ const content = await readFile(join4(rulesDir, file), "utf-8");
3194
3662
  rules.push(JSON.parse(content));
3195
3663
  } catch (err) {
3196
3664
  process.stderr.write(
@@ -3350,25 +3818,25 @@ function emptyAudit(total, reason) {
3350
3818
  }
3351
3819
 
3352
3820
  // src/radiant/memory/palace.ts
3353
- var import_fs2 = require("fs");
3354
- var import_path2 = require("path");
3821
+ var import_fs3 = require("fs");
3822
+ var import_path3 = require("path");
3355
3823
  function writeRead(exocortexDir, frontmatter, text) {
3356
- const dir = (0, import_path2.resolve)(exocortexDir, "radiant", "reads");
3357
- (0, import_fs2.mkdirSync)(dir, { recursive: true });
3824
+ const dir = (0, import_path3.resolve)(exocortexDir, "radiant", "reads");
3825
+ (0, import_fs3.mkdirSync)(dir, { recursive: true });
3358
3826
  const date = (/* @__PURE__ */ new Date()).toISOString().split("T")[0];
3359
3827
  const filename = `${date}.md`;
3360
- const filepath = (0, import_path2.join)(dir, filename);
3828
+ const filepath = (0, import_path3.join)(dir, filename);
3361
3829
  const content = `${frontmatter}
3362
3830
 
3363
3831
  ${text}
3364
3832
  `;
3365
- (0, import_fs2.writeFileSync)(filepath, content, "utf-8");
3833
+ (0, import_fs3.writeFileSync)(filepath, content, "utf-8");
3366
3834
  return filepath;
3367
3835
  }
3368
3836
  function updateKnowledge(exocortexDir, persistence, options) {
3369
- const dir = (0, import_path2.resolve)(exocortexDir, "radiant");
3370
- (0, import_fs2.mkdirSync)(dir, { recursive: true });
3371
- const filepath = (0, import_path2.join)(dir, "knowledge.md");
3837
+ const dir = (0, import_path3.resolve)(exocortexDir, "radiant");
3838
+ (0, import_fs3.mkdirSync)(dir, { recursive: true });
3839
+ const filepath = (0, import_path3.join)(dir, "knowledge.md");
3372
3840
  const totalReads = options?.totalReads ?? 0;
3373
3841
  const existingUntriggered = loadUntriggeredCounts(filepath);
3374
3842
  const lines = [
@@ -3455,14 +3923,14 @@ function updateKnowledge(exocortexDir, persistence, options) {
3455
3923
  lines.push(`${name}=${count}`);
3456
3924
  }
3457
3925
  lines.push("-->");
3458
- (0, import_fs2.writeFileSync)(filepath, lines.join("\n"), "utf-8");
3926
+ (0, import_fs3.writeFileSync)(filepath, lines.join("\n"), "utf-8");
3459
3927
  return filepath;
3460
3928
  }
3461
3929
  function loadUntriggeredCounts(filepath) {
3462
3930
  const counts = /* @__PURE__ */ new Map();
3463
- if (!(0, import_fs2.existsSync)(filepath)) return counts;
3931
+ if (!(0, import_fs3.existsSync)(filepath)) return counts;
3464
3932
  try {
3465
- const content = (0, import_fs2.readFileSync)(filepath, "utf-8");
3933
+ const content = (0, import_fs3.readFileSync)(filepath, "utf-8");
3466
3934
  const match = content.match(
3467
3935
  /<!-- untriggered_counts[\s\S]*?-->/
3468
3936
  );
@@ -3480,13 +3948,13 @@ function loadUntriggeredCounts(filepath) {
3480
3948
  return counts;
3481
3949
  }
3482
3950
  function loadPriorReads(exocortexDir) {
3483
- const dir = (0, import_path2.resolve)(exocortexDir, "radiant", "reads");
3484
- if (!(0, import_fs2.existsSync)(dir)) return [];
3485
- const files = (0, import_fs2.readdirSync)(dir).filter((f) => f.endsWith(".md")).sort();
3951
+ const dir = (0, import_path3.resolve)(exocortexDir, "radiant", "reads");
3952
+ if (!(0, import_fs3.existsSync)(dir)) return [];
3953
+ const files = (0, import_fs3.readdirSync)(dir).filter((f) => f.endsWith(".md")).sort();
3486
3954
  const reads = [];
3487
3955
  for (const filename of files) {
3488
- const filepath = (0, import_path2.join)(dir, filename);
3489
- const content = (0, import_fs2.readFileSync)(filepath, "utf-8");
3956
+ const filepath = (0, import_path3.join)(dir, filename);
3957
+ const content = (0, import_fs3.readFileSync)(filepath, "utf-8");
3490
3958
  const date = filename.replace(".md", "");
3491
3959
  const fmMatch = content.match(/^---\n([\s\S]*?)\n---/);
3492
3960
  const frontmatter = fmMatch ? fmMatch[1] : "";
@@ -3577,16 +4045,29 @@ function resolveLens(id) {
3577
4045
  async function emergent(input) {
3578
4046
  const lens = resolveLens2(input.lensId);
3579
4047
  const windowDays = input.windowDays ?? 14;
4048
+ let worldStack;
4049
+ let worldmodelContent = input.worldmodelContent;
4050
+ if (!worldmodelContent || worldmodelContent.trim() === "") {
4051
+ worldStack = discoverWorlds({ explicitWorldsDir: input.worldPath });
4052
+ worldmodelContent = worldStack.combinedContent;
4053
+ }
3580
4054
  let statedIntent;
3581
4055
  let exocortexContext;
3582
4056
  let priorReadContext = "";
3583
4057
  if (input.exocortexPath) {
3584
- exocortexContext = readExocortex(input.exocortexPath);
3585
- const formatted = formatExocortexForPrompt(exocortexContext);
3586
- if (formatted) statedIntent = formatted;
4058
+ const repoName = input.scope.type === "repo" ? input.scope.repo : void 0;
4059
+ exocortexContext = readExocortex(input.exocortexPath, repoName);
4060
+ const compressed = compressExocortex(exocortexContext);
4061
+ if (compressed) {
4062
+ statedIntent = `## Stated Intent (from exocortex, compressed)
4063
+
4064
+ ${compressed}
4065
+
4066
+ Compare stated intent against actual GitHub activity. Gaps = drift.`;
4067
+ }
3587
4068
  const priorReads = loadPriorReads(input.exocortexPath);
3588
4069
  if (priorReads.length > 0) {
3589
- priorReadContext = formatPriorReadsForPrompt(priorReads);
4070
+ priorReadContext = compressPriorReads(priorReads);
3590
4071
  }
3591
4072
  }
3592
4073
  let events;
@@ -3604,6 +4085,40 @@ async function emergent(input) {
3604
4085
  windowDays
3605
4086
  });
3606
4087
  }
4088
+ let adapterSignals = "";
4089
+ const activeAdapters = ["github"];
4090
+ const discordToken = process.env.DISCORD_TOKEN;
4091
+ const discordGuild = process.env.DISCORD_GUILD_ID;
4092
+ if (discordToken && discordGuild) {
4093
+ try {
4094
+ const discord = await fetchDiscordActivity(discordGuild, discordToken, { windowDays });
4095
+ events.push(...discord.events);
4096
+ adapterSignals += "\n\n" + formatDiscordSignalsForPrompt(discord.signals);
4097
+ activeAdapters.push("discord");
4098
+ } catch {
4099
+ }
4100
+ }
4101
+ const slackToken = process.env.SLACK_TOKEN;
4102
+ if (slackToken) {
4103
+ try {
4104
+ const slack = await fetchSlackActivity(slackToken, { windowDays });
4105
+ events.push(...slack.events);
4106
+ adapterSignals += "\n\n" + formatSlackSignalsForPrompt(slack.signals);
4107
+ activeAdapters.push("slack");
4108
+ } catch {
4109
+ }
4110
+ }
4111
+ const notionToken = process.env.NOTION_TOKEN;
4112
+ if (notionToken) {
4113
+ try {
4114
+ const notion = await fetchNotionActivity(notionToken, { windowDays });
4115
+ events.push(...notion.events);
4116
+ adapterSignals += "\n\n" + formatNotionSignalsForPrompt(notion.signals);
4117
+ activeAdapters.push("notion");
4118
+ } catch {
4119
+ }
4120
+ }
4121
+ events.sort((a, b) => Date.parse(a.timestamp) - Date.parse(b.timestamp));
3607
4122
  const classified = classifyEvents(events);
3608
4123
  const signals = extractSignals(classified);
3609
4124
  const scores = computeScores(signals, input.worldmodelContent !== "");
@@ -3614,7 +4129,7 @@ async function emergent(input) {
3614
4129
  lens,
3615
4130
  ai: input.ai,
3616
4131
  canonicalPatterns: input.canonicalPatterns,
3617
- statedIntent: statedIntent ? statedIntent + (priorReadContext ? "\n\n" + priorReadContext : "") : priorReadContext || void 0
4132
+ statedIntent: [statedIntent, adapterSignals, priorReadContext].filter(Boolean).join("\n\n") || void 0
3618
4133
  });
3619
4134
  const rewrittenPatterns = patterns.map((p) => lens.rewrite(p));
3620
4135
  const allDescriptions = rewrittenPatterns.map((p) => p.description).join("\n");
@@ -3663,7 +4178,9 @@ async function emergent(input) {
3663
4178
  voiceClean: voiceViolations.length === 0,
3664
4179
  signals,
3665
4180
  scores,
3666
- eventCount: events.length
4181
+ eventCount: events.length,
4182
+ activeAdapters,
4183
+ worldStack
3667
4184
  };
3668
4185
  }
3669
4186
  function computeScores(signals, worldmodelLoaded) {
@@ -3728,10 +4245,15 @@ var RADIANT_PACKAGE_VERSION = "0.0.0";
3728
4245
  classifyActorDomain,
3729
4246
  classifyEvents,
3730
4247
  composeSystemPrompt,
4248
+ compressExocortex,
4249
+ compressLens,
4250
+ compressPriorReads,
4251
+ compressWorldmodel,
3731
4252
  computePersistence,
3732
4253
  createAnthropicAI,
3733
4254
  createMockAI,
3734
4255
  createMockGitHubAdapter,
4256
+ discoverWorlds,
3735
4257
  emergent,
3736
4258
  extractSignals,
3737
4259
  fetchDiscordActivity,
@@ -3739,6 +4261,7 @@ var RADIANT_PACKAGE_VERSION = "0.0.0";
3739
4261
  fetchGitHubOrgActivity,
3740
4262
  fetchNotionActivity,
3741
4263
  fetchSlackActivity,
4264
+ formatActiveWorlds,
3742
4265
  formatDiscordSignalsForPrompt,
3743
4266
  formatExocortexForPrompt,
3744
4267
  formatNotionSignalsForPrompt,
@@ -3763,6 +4286,7 @@ var RADIANT_PACKAGE_VERSION = "0.0.0";
3763
4286
  scoreCyber,
3764
4287
  scoreLife,
3765
4288
  scoreNeuroVerse,
4289
+ sovereignConduitLens,
3766
4290
  summarizeExocortex,
3767
4291
  think,
3768
4292
  updateKnowledge,