@neuroverseos/governance 0.6.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1301 @@
1
+ /**
2
+ * @neuroverseos/governance/radiant — core types
3
+ *
4
+ * Encodes the Universal Math frame from radiant/PROJECT-PLAN.md:
5
+ *
6
+ * - Asymmetric Life/Cyber capability spaces (two distinct native dimension sets,
7
+ * never mirrored).
8
+ * - Presence-based averaging — no weights, no coefficients.
9
+ * - N (coherence) requires a loaded worldmodel; UNAVAILABLE otherwise.
10
+ * - INSUFFICIENT_EVIDENCE and UNAVAILABLE are first-class sentinel states.
11
+ * Silence is never scored as neutral.
12
+ *
13
+ * Conceptually: NeuroverseOS is the universe the two gyroscopes exist in.
14
+ * It defines how they can behave and how they can survive. These types
15
+ * describe what the gyroscopes look like when measured from inside that
16
+ * universe.
17
+ */
18
+ /**
19
+ * Deterministic presence rule. A dimension or bridging component is
20
+ * considered present iff event_count >= k AND confidence >= c.
21
+ *
22
+ * Tunable per worldmodel via frontmatter. Not per-call — tuning belongs to
23
+ * the constitution, not to the invocation.
24
+ */
25
+ interface EvidenceGate {
26
+ /** Minimum event count in the measurement window. */
27
+ k: number;
28
+ /** Minimum signal extraction confidence (0–1). */
29
+ c: number;
30
+ }
31
+ /** Default gate: k=3 events, c=0.5 confidence. Overridable per worldmodel. */
32
+ declare const DEFAULT_EVIDENCE_GATE: EvidenceGate;
33
+ /**
34
+ * Any item that carries a 0–100 score, an event count, and a 0–1 confidence.
35
+ * `presenceAverage` operates on arrays of this shape, regardless of the
36
+ * domain-specific identity each item carries.
37
+ */
38
+ interface ScoredObservation {
39
+ score: number;
40
+ eventCount: number;
41
+ confidence: number;
42
+ }
43
+ /**
44
+ * A single observed life-native capability dimension. Defaults declared by
45
+ * the NeuroVerse base worldmodel: Cognition, Creativity, Sensory. A per-org
46
+ * worldmodel can declare more or fewer.
47
+ */
48
+ interface LifeDimension extends ScoredObservation {
49
+ id: string;
50
+ }
51
+ /**
52
+ * The life gyroscope's measured state across its native dimensions.
53
+ */
54
+ interface LifeCapability {
55
+ dimensions: LifeDimension[];
56
+ }
57
+ /**
58
+ * A single observed cyber-native capability dimension. Defaults declared by
59
+ * the NeuroVerse base worldmodel: AI-reasoning, AR/adaptivity, Spatial.
60
+ *
61
+ * CyberDimension is a distinct type from LifeDimension — the two gyroscopes
62
+ * have independent native vocabularies and must not be mixed at the type
63
+ * level.
64
+ */
65
+ interface CyberDimension extends ScoredObservation {
66
+ id: string;
67
+ }
68
+ /**
69
+ * The cyber gyroscope's measured state across its native dimensions.
70
+ */
71
+ interface CyberCapability {
72
+ dimensions: CyberDimension[];
73
+ }
74
+ /**
75
+ * The four components of NeuroVerse coherence. Each measures a different
76
+ * kind of translation through an open corridor:
77
+ *
78
+ * ALIGN — both sides point at the same mission via shared worldmodel refs
79
+ * HANDOFF — life-side output is legibly picked up by cyber-side (or vice versa)
80
+ * CO_DECISION — both intelligences contributed interpretable input to a decision
81
+ * CO_EXECUTION — both intelligences are present in a shipped artifact
82
+ */
83
+ type BridgingComponent = 'ALIGN' | 'HANDOFF' | 'CO_DECISION' | 'CO_EXECUTION';
84
+ /**
85
+ * An aggregated score for one of the four bridging components, built from
86
+ * individual corridor-opening events where life-side and cyber-side activity
87
+ * referenced the same worldmodel element (invariant / signal / lens / context).
88
+ */
89
+ interface BridgingComponentScore extends ScoredObservation {
90
+ component: BridgingComponent;
91
+ }
92
+ /**
93
+ * Sentinel states a score can take when no number is meaningful.
94
+ *
95
+ * INSUFFICIENT_EVIDENCE — the dimension/entity exists in this universe, but
96
+ * not enough observed evidence passed the presence gate.
97
+ * UNAVAILABLE — the measurement is structurally undefined. Currently only
98
+ * used for N when no worldmodel is loaded (no shared universe for the
99
+ * gyroscopes to register against).
100
+ */
101
+ type ScoreSentinel = 'INSUFFICIENT_EVIDENCE' | 'UNAVAILABLE';
102
+ /**
103
+ * An entity or composite score: a number in [0, 100] when computable, or a
104
+ * sentinel explaining why no number is available.
105
+ */
106
+ type Score = number | ScoreSentinel;
107
+ /** Type guard: this Score is a usable number. */
108
+ declare function isScored(s: Score): s is number;
109
+ /** Type guard: this Score is a sentinel. */
110
+ declare function isSentinel(s: Score): s is ScoreSentinel;
111
+ /**
112
+ * Radiant's read on whether observed activity aligns with the stated
113
+ * worldmodel. Orthogonal to engine-level ViabilityStatus (which describes
114
+ * whether the worldmodel itself remains structurally coherent). Both surface
115
+ * side-by-side; neither is collapsed into the other.
116
+ *
117
+ * INSUFFICIENT_EVIDENCE is first-class. Silence is never scored as neutral.
118
+ */
119
+ type AlignmentStatus = 'STRONG' | 'STABLE' | 'WATCHING' | 'FRAGILE' | 'MISALIGNED' | 'INSUFFICIENT_EVIDENCE';
120
+ /**
121
+ * An observed behavioral pattern identified by AI pattern interpretation.
122
+ * Two kinds:
123
+ *
124
+ * canonical — the worldmodel declared this pattern by name (e.g.
125
+ * 'coordination_drift' as an evolution-layer pattern). The AI
126
+ * identified it in the activity.
127
+ * candidate — the AI noticed a pattern the worldmodel hasn't
128
+ * declared. Surfaces as "emergent," and accumulates across runs as
129
+ * a potential worldmodel-evolution proposal.
130
+ *
131
+ * Step 5 produces these via AI interpretation + guard-engine governance
132
+ * (evidence gate, invariant check, no-hallucination check). Step 6 (the
133
+ * rendering lens) transforms them by annotating framing + emphasis
134
+ * metadata before the renderer turns them into output text.
135
+ */
136
+ interface ObservedPattern {
137
+ name: string;
138
+ type: 'canonical' | 'candidate';
139
+ /** If canonical, the worldmodel-declared pattern name this matched. */
140
+ declaredAs?: string;
141
+ /** Description of the pattern, in lens voice. */
142
+ description: string;
143
+ /** Evidence the AI cited when naming this pattern. */
144
+ evidence: PatternEvidence;
145
+ /** Rendering-lens annotation: which framing applies. Set by lens.rewrite. */
146
+ framing?: string;
147
+ /** Rendering-lens annotation: what this lens wants weighted. Set by lens.rewrite. */
148
+ emphasis?: string;
149
+ /** Rendering-lens annotation: should the renderer compress? Set by lens.rewrite. */
150
+ compress?: boolean;
151
+ /** AI's confidence in the observation, 0–1. */
152
+ confidence: number;
153
+ }
154
+ interface PatternEvidence {
155
+ /** Signal cell refs, e.g. 'alignment.life', 'follow_through.joint'. */
156
+ signals: string[];
157
+ /** Event IDs grounding the pattern. */
158
+ events: string[];
159
+ /** If the pattern cites a worldmodel invariant (especially for candidates). */
160
+ cited_invariant?: string;
161
+ }
162
+ /**
163
+ * A rendering lens: a deterministic transform applied to patterns + a set
164
+ * of voice / vocabulary / framing rules enforced at the renderer.
165
+ *
166
+ * Three parts:
167
+ * 1. primary_frame — the analytical framework the AI is prompted to
168
+ * reason through when interpreting activity (e.g. the vanguard
169
+ * three-domain scoring).
170
+ * 2. vocabulary + voice — terms to prefer, terms to avoid, forbidden
171
+ * phrasings, preferred phrasings, register directives.
172
+ * 3. rewrite + exemplar refs — the pattern-transform function and
173
+ * pointers to ground-truth reference material for calibration.
174
+ *
175
+ * Guardrails (enforced by the guard engine + the renderer):
176
+ * - cannot invent new signals
177
+ * - cannot override evidence
178
+ * - cannot hallucinate intent
179
+ * - cannot change what is true; only what is emphasized or framed
180
+ */
181
+ interface RenderingLens {
182
+ name: string;
183
+ description: string;
184
+ /** The primary analytical framework — what the AI reasons through first. */
185
+ primary_frame: PrimaryFrame;
186
+ /** Vocabulary map — generic → lens-native term substitutions. */
187
+ vocabulary: LensVocabulary;
188
+ /** Register, tone, specificity, and other voice-level directives. */
189
+ voice: VoiceDirectives;
190
+ /** Phrasings that fail the renderer if present in output. */
191
+ forbidden_phrases: readonly string[];
192
+ /** Phrasings/patterns the lens prefers — calibration for the renderer. */
193
+ preferred_patterns: readonly string[];
194
+ /**
195
+ * Strategic decision patterns the lens encodes. When the AI proposes a
196
+ * move, these patterns inform how it frames the recommendation.
197
+ */
198
+ strategic_patterns: readonly string[];
199
+ /**
200
+ * Pointers to exemplar reference material — worked examples of this
201
+ * lens's primary_frame being implemented in practice. Used for:
202
+ * - few-shot grounding in the AI prompt
203
+ * - voice calibration against ground-truth output
204
+ * - evaluation baselines for testing the lens's effect
205
+ */
206
+ exemplar_refs: readonly ExemplarRef[];
207
+ /**
208
+ * Pattern-transform function. Takes an ObservedPattern and annotates
209
+ * it with framing / emphasis / compress metadata. Pure function; no
210
+ * side effects; must not change `evidence` or `name`.
211
+ */
212
+ rewrite: (pattern: ObservedPattern) => ObservedPattern;
213
+ }
214
+ /**
215
+ * The primary analytical framework declared by a lens. For the vanguard
216
+ * lens this is the three-domain scoring (Future Foresight, Narrative
217
+ * Dynamics, Shared Prosperity) with its overlap emergent states.
218
+ *
219
+ * The AI is prompted to reason through this framework first when
220
+ * interpreting activity. It answers the evaluation questions and names
221
+ * which domains are present, which are weak, and which overlaps light up.
222
+ */
223
+ interface PrimaryFrame {
224
+ /** Domain identifiers (kebab-case), e.g. ['future-foresight', 'narrative-dynamics', 'shared-prosperity']. */
225
+ domains: readonly string[];
226
+ /** Named emergent states produced when specific pairs of domains overlap. */
227
+ overlaps: readonly OverlapDef[];
228
+ /** The center identity — what the system becomes when all domains integrate. */
229
+ center_identity: string;
230
+ /** Questions the AI is prompted to answer when applying this frame. */
231
+ evaluation_questions: readonly string[];
232
+ /** Description of how to apply the frame to activity. */
233
+ scoring_rubric: string;
234
+ }
235
+ interface OverlapDef {
236
+ /** The two domain identifiers this overlap joins. Order-insensitive. */
237
+ domains: readonly [string, string];
238
+ /** The emergent state the overlap produces (e.g. 'Inspiration'). */
239
+ emergent_state: string;
240
+ /** Short description of what this overlap looks like in practice. */
241
+ description: string;
242
+ }
243
+ interface LensVocabulary {
244
+ /** Proper nouns the lens expects to appear verbatim (not paraphrased). */
245
+ proper_nouns: readonly string[];
246
+ /**
247
+ * Generic term → lens-native replacement. The renderer substitutes
248
+ * generic phrasings with lens-native ones. E.g. `'device': 'participant'`.
249
+ */
250
+ preferred: Record<string, string>;
251
+ /** Architectural vocabulary specific to this lens's domain. */
252
+ architecture: readonly string[];
253
+ /** Economic / resource vocabulary specific to this lens's domain. */
254
+ economic: readonly string[];
255
+ /** Framing and mission-level vocabulary for this lens. */
256
+ framing: readonly string[];
257
+ /**
258
+ * System-internal concepts → plain English. Applied to OUTPUT only —
259
+ * before the AI surfaces a description, it must translate any of these
260
+ * terms into their right-column equivalent. Readers don't know Radiant's
261
+ * internal vocabulary; speaking it to them creates false precision.
262
+ *
263
+ * E.g. 'worldmodel' → 'your strategy file', 'candidate pattern' →
264
+ * 'something noticed but not yet tracked by name'.
265
+ */
266
+ jargon_translations: Record<string, string>;
267
+ }
268
+ interface VoiceDirectives {
269
+ /** Register in which the lens speaks (e.g. 'diagnosis mode'). */
270
+ register: string;
271
+ /** Active-voice requirement. */
272
+ active_voice: 'required' | 'preferred' | 'flexible';
273
+ /** Specificity requirement — names, numbers, places. */
274
+ specificity: 'required' | 'preferred' | 'flexible';
275
+ /** How the lens treats hype vocabulary. */
276
+ hype_vocabulary: 'forbidden' | 'discouraged' | 'allowed';
277
+ /** How the lens treats hedged / qualified phrasing. */
278
+ hedging: 'forbidden' | 'discouraged' | 'allowed';
279
+ /** Whether playfulness is allowed in this register. */
280
+ playfulness: 'allowed' | 'rare' | 'forbidden';
281
+ /** Whether output should close with a strategic-frame sentence. */
282
+ close_with_strategic_frame: 'preferred' | 'required' | 'optional';
283
+ /** Whether the "punchline move" (rhythm-break emphasis) is sanctioned. */
284
+ punchline_move: 'sparing' | 'frequent' | 'avoided';
285
+ /** Whether the lens requires honesty about what isn't working. */
286
+ honesty_about_failure: 'required' | 'preferred' | 'optional';
287
+ /**
288
+ * Reason-internally / express-externally directive.
289
+ *
290
+ * Some lens frames (like the vanguard three-domain scoring) are
291
+ * model-maker scaffolds — useful for AI reasoning, confusing as
292
+ * reader-facing labels. This directive tells the AI to reason
293
+ * through bucket-level concepts but express findings in the
294
+ * skill-level vocabulary inside each bucket. Bucket names live in
295
+ * `forbidden_phrases` so the renderer enforces the rule at build
296
+ * time; this directive gives the AI the "why" to cooperate
297
+ * upstream of the enforcement.
298
+ */
299
+ output_translation: string;
300
+ }
301
+ /**
302
+ * Pointer to a reference exemplar — a worked example of this lens's
303
+ * primary_frame being implemented. These live under
304
+ * `src/radiant/examples/<org>/exemplars/`.
305
+ */
306
+ interface ExemplarRef {
307
+ /** Path relative to the exemplars directory. */
308
+ path: string;
309
+ /** Short title for this exemplar. */
310
+ title: string;
311
+ /** Which domains from the primary_frame this exemplar exhibits. */
312
+ exhibits: readonly string[];
313
+ /**
314
+ * Integration quality: 'full' (all domains present + integrated),
315
+ * 'partial' (some domains strong, others absent), 'primary-dominant'
316
+ * (one domain dominates), etc. Free-form.
317
+ */
318
+ integration_quality: string;
319
+ /** Notes about what this exemplar teaches for lens calibration. */
320
+ notes: string;
321
+ }
322
+
323
+ /**
324
+ * @neuroverseos/governance/radiant — L / C / N / R scoring math
325
+ *
326
+ * Presence-based averaging over asymmetric Life and Cyber capability spaces.
327
+ * No weights. No coefficients. Silence is never scored as neutral.
328
+ *
329
+ * Implements the Universal Math section of radiant/PROJECT-PLAN.md.
330
+ * The canonical formulas live there; this file is their runtime.
331
+ */
332
+
333
+ /**
334
+ * A dimension or bridging component is present iff
335
+ * event_count >= k AND confidence >= c.
336
+ *
337
+ * Absent items are excluded from averages — never zero-scored.
338
+ */
339
+ declare function isPresent(o: Pick<ScoredObservation, 'eventCount' | 'confidence'>, gate?: EvidenceGate): boolean;
340
+ /**
341
+ * Average the scores of items that pass the presence gate. Returns
342
+ * INSUFFICIENT_EVIDENCE if none pass.
343
+ *
344
+ * This is the single averaging primitive — all entity scores (L, C, N) and
345
+ * the composite R are built from it.
346
+ */
347
+ declare function presenceAverage(items: ReadonlyArray<ScoredObservation>, gate?: EvidenceGate): Score;
348
+ /**
349
+ * Life gyroscope score. Averages over present life-native dimensions.
350
+ *
351
+ * Pure cognition team (only COG present) → L = score(COG) — full 0–100.
352
+ * All three active → L = avg(COG, CRE, SEN).
353
+ * No dimension present → L = INSUFFICIENT_EVIDENCE.
354
+ */
355
+ declare function scoreLife(capability: LifeCapability, gate?: EvidenceGate): Score;
356
+ /**
357
+ * Cyber gyroscope score. Averages over present cyber-native dimensions.
358
+ *
359
+ * Same shape as scoreLife, but operates on a distinct (asymmetric) dimension
360
+ * set. A human exercising COG while an AI exercises AR are both fully
361
+ * engaged — neither is "off," and this math does not force symmetry.
362
+ */
363
+ declare function scoreCyber(capability: CyberCapability, gate?: EvidenceGate): Score;
364
+ /**
365
+ * NeuroVerse coherence. Translation quality through open corridors.
366
+ *
367
+ * No worldmodel loaded → UNAVAILABLE. There is no shared universe for the
368
+ * gyroscopes to register against; coherence is undefined, not zero.
369
+ * Worldmodel loaded, no corridor-opening evidence passes the gate →
370
+ * INSUFFICIENT_EVIDENCE.
371
+ * Otherwise → presence-average over the four bridging components.
372
+ *
373
+ * High L and high C can still produce low N: two excellent intelligences
374
+ * working past each other without opening a corridor score low. That is
375
+ * the correct behavior — N is not synchronization.
376
+ */
377
+ declare function scoreNeuroVerse(components: ReadonlyArray<BridgingComponentScore>, worldmodelLoaded: boolean, gate?: EvidenceGate): Score;
378
+ /**
379
+ * Composite alignment. Averages over whichever entity alignments are
380
+ * available, excluding any in INSUFFICIENT_EVIDENCE or UNAVAILABLE.
381
+ *
382
+ * All-human deployment (A_C unavailable) → R = A_L.
383
+ * All-AI pipeline (A_L unavailable) → R = A_C.
384
+ * Hybrid with worldmodel loaded → R = avg(A_L, A_C, A_N).
385
+ * Nothing available → R = INSUFFICIENT_EVIDENCE.
386
+ *
387
+ * Sentinels are excluded, not zeroed. A missing entity does not drag R down.
388
+ */
389
+ declare function scoreComposite(a_L: Score, a_C: Score, a_N: Score): Score;
390
+
391
+ /**
392
+ * @neuroverseos/governance/radiant — actor_domain classification
393
+ *
394
+ * Every event in Radiant's pipeline is tagged `life`, `cyber`, or `joint`
395
+ * before signals are extracted. This is the fixed, universal classifier —
396
+ * declared as part of the NeuroverseOS universe, not a per-worldmodel choice.
397
+ *
398
+ * life — human actions alone (commits authored by people, human reviews,
399
+ * human-written decisions)
400
+ * cyber — AI or bot actions alone (AI-generated code, automated comments,
401
+ * bot commits)
402
+ * joint — activity where human and AI both participated: a human accepting
403
+ * or rejecting AI output, iterative co-edits, co-authored commits,
404
+ * escalation loops between life-side and cyber-side
405
+ *
406
+ * The classifier is deterministic and pure. It looks only at the event's
407
+ * actor metadata and its relationships (co-actors, respondsTo). Adapters
408
+ * are responsible for populating those fields accurately from their source
409
+ * of truth (GitHub, ExoCortex, chat, etc.).
410
+ *
411
+ * See radiant/PROJECT-PLAN.md — "actor_domain Classification".
412
+ */
413
+ /**
414
+ * What kind of actor produced an event.
415
+ *
416
+ * human — a person
417
+ * ai — an AI agent (Claude, Copilot-generated code, agent output)
418
+ * bot — a non-AI automated actor (dependabot, CI bots, webhook bots)
419
+ * unknown — actor kind cannot be determined from available evidence
420
+ *
421
+ * `bot` is grouped with `ai` on the cyber side of the boundary — from the
422
+ * gyroscope's perspective, both are non-human actors operating in the
423
+ * universe. The distinction may matter to specific signals but not to the
424
+ * domain classifier.
425
+ */
426
+ type ActorKind = 'human' | 'ai' | 'bot' | 'unknown';
427
+ /** An entity that produced or participated in an event. */
428
+ interface Actor {
429
+ id: string;
430
+ kind: ActorKind;
431
+ name?: string;
432
+ }
433
+ /** A reference to a prior event this one responds to. */
434
+ interface EventReference {
435
+ eventId: string;
436
+ actor: Actor;
437
+ }
438
+ /**
439
+ * A minimal event shape sufficient for domain classification and downstream
440
+ * signal extraction. Adapters (GitHub, ExoCortex, chat) populate these
441
+ * fields from their respective source of truth.
442
+ *
443
+ * The classifier in this file only reads `actor`, `coActors`, and
444
+ * `respondsTo`. Signal extractors (step 4) additionally read `kind` and
445
+ * `content`. Later steps may extend this interface; keep additions
446
+ * optional so existing adapters remain compatible.
447
+ */
448
+ interface Event {
449
+ id: string;
450
+ timestamp: string;
451
+ actor: Actor;
452
+ /** Additional participants (e.g. Co-authored-by trailers on a commit). */
453
+ coActors?: Actor[];
454
+ /** If this event is a reply, review, merge, or edit of a prior event. */
455
+ respondsTo?: EventReference;
456
+ /**
457
+ * Loose event-kind tag from the adapter — e.g. `commit`, `pr_opened`,
458
+ * `pr_review`, `issue_comment`, `chat_message`. Free-form string so any
459
+ * adapter can declare its own kinds; signal extractors interpret them.
460
+ */
461
+ kind?: string;
462
+ /**
463
+ * Human-meaningful textual content of the event — commit message, PR
464
+ * description, review body, chat message body, etc. Used by signal
465
+ * extractors to score clarity and related signals.
466
+ */
467
+ content?: string;
468
+ /** Adapter-specific structured data, opaque to core Radiant. */
469
+ metadata?: Record<string, unknown>;
470
+ }
471
+ /**
472
+ * The three-way domain tag applied to every event. Drives which gyroscope's
473
+ * capability space the event contributes to (life or cyber), or — for joint
474
+ * events — feeds the bridging-component scoring that powers N.
475
+ */
476
+ type ActorDomain = 'life' | 'cyber' | 'joint';
477
+ /**
478
+ * Tag an event as `life`, `cyber`, or `joint`.
479
+ *
480
+ * Rules, in order:
481
+ *
482
+ * 1. Mixed authorship → joint. If the event has co-actors and the
483
+ * combined set spans both life-side and cyber-side, the event is
484
+ * joint regardless of who's primary. A commit with a human author
485
+ * and an AI co-author is joint.
486
+ *
487
+ * 2. Cross-boundary response → joint. If the event is a response to a
488
+ * prior event whose actor is on the opposite side of the boundary,
489
+ * the event is joint. A human merging an AI-authored PR is joint.
490
+ * An AI commenting on a human-authored issue is joint.
491
+ *
492
+ * 3. Otherwise, classify by the primary actor's kind.
493
+ * - ai | bot → cyber
494
+ * - human | unknown → life
495
+ *
496
+ * Rule 3's `unknown → life` default is deliberate: most events in most
497
+ * systems come from humans. When an adapter cannot determine actor kind,
498
+ * the event is assumed human until proven otherwise. Upgrading requires
499
+ * positive evidence (a bot user-agent, an AI signature, a "Co-authored-by:
500
+ * Claude" trailer, etc.), never absence.
501
+ */
502
+ declare function classifyActorDomain(event: Event): ActorDomain;
503
+
504
+ /**
505
+ * @neuroverseos/governance/radiant — Auki Builder Lens
506
+ *
507
+ * A rendering lens that prompts the AI to reason through Auki's vanguard
508
+ * leadership model when interpreting activity, and enforces voice /
509
+ * vocabulary / framing rules consistent with how Auki builders
510
+ * communicate when that model is in effect.
511
+ *
512
+ * The lens does NOT capture any specific person's speaking patterns.
513
+ * It captures the vanguard leadership operating system — codified as the
514
+ * three-domain analytical frame (Future Foresight, Narrative Dynamics,
515
+ * Shared Prosperity) together with the voice and vocabulary an Auki
516
+ * builder uses when that operating system is running.
517
+ *
518
+ * The vanguard worldmodel (src/worlds/auki-vanguard.worldmodel.md) is the
519
+ * abstract DNA. This lens is how that DNA speaks and thinks.
520
+ *
521
+ * Exemplars — worked instances of the vanguard model being implemented —
522
+ * live at src/radiant/examples/auki/exemplars/. The lens references them
523
+ * for calibration: when applying this lens, the AI's output should fall
524
+ * in the same neighborhood as those exemplars, not match them verbatim.
525
+ *
526
+ * Guardrails (enforced downstream):
527
+ * - cannot invent new signals
528
+ * - cannot override evidence
529
+ * - cannot hallucinate intent
530
+ * - cannot change what is true; only what is emphasized and framed
531
+ *
532
+ * See radiant/PROJECT-PLAN.md §"Three-Layer Interpretation Architecture".
533
+ */
534
+
535
+ /**
536
+ * The Auki Builder Lens. A first-class npm export under
537
+ * `@neuroverseos/governance/radiant/lenses`, consumable by the
538
+ * `neuroverse radiant think` / `emergent` / `decision` commands.
539
+ *
540
+ * The lens is Auki-specific *content* in the form of a generic
541
+ * RenderingLens primitive. The extraction to generic OSS Radiant
542
+ * replaces this file's content with a template scaffold; the surrounding
543
+ * code (renderer, MCP server, lens system) stays unchanged.
544
+ */
545
+ declare const aukiBuilderLens: RenderingLens;
546
+
547
+ /**
548
+ * @neuroverseos/governance/radiant/lenses
549
+ *
550
+ * Rendering lenses — the third of the three interpretation layers
551
+ * described in radiant/PROJECT-PLAN.md. Each lens is a deterministic
552
+ * pattern-transform plus a set of voice / vocabulary / framing rules.
553
+ *
554
+ * Lenses are first-class npm exports. Registering a new lens means
555
+ * exporting its definition from this file. The `neuroverse radiant`
556
+ * commands accept a `--lens <id>` flag and resolve it against the
557
+ * registered set.
558
+ *
559
+ * Current registry:
560
+ * - auki-builder — Auki's vanguard leadership lens. Reason internally
561
+ * through Future Foresight / Narrative Dynamics / Shared Prosperity;
562
+ * express externally with the skill-level vocabulary inside each
563
+ * domain. See ./auki-builder.ts for the content.
564
+ *
565
+ * To add a new lens:
566
+ * 1. Create src/radiant/lenses/<name>.ts exporting a `RenderingLens`.
567
+ * 2. Re-export it from this file.
568
+ * 3. Add it to `LENSES` below.
569
+ * 4. If it's an Auki-specific lens, live alongside auki-builder here.
570
+ * If it's a generic OSS lens, start a fresh family.
571
+ */
572
+
573
+ /**
574
+ * Registered lenses keyed by name. Consumers (CLI, MCP server, tests)
575
+ * resolve a lens by id through this map.
576
+ */
577
+ declare const LENSES: Readonly<Record<string, RenderingLens>>;
578
+ /**
579
+ * Resolve a lens by id. Returns undefined if not registered — callers
580
+ * should surface a clear error rather than silently falling back to a
581
+ * default lens (which would violate the voice/framing enforcement).
582
+ */
583
+ declare function getLens(id: string): RenderingLens | undefined;
584
+ /**
585
+ * List all registered lens ids. Used by `neuroverse radiant lenses list`.
586
+ */
587
+ declare function listLenses(): readonly string[];
588
+
589
+ /**
590
+ * @neuroverseos/governance/radiant — signal extraction
591
+ *
592
+ * Step 4: turn a list of classified events into the 5 × 3 signal matrix
593
+ * that downstream steps (pattern composition, rendering lens, renderer)
594
+ * read to produce alignment output.
595
+ *
596
+ * The matrix is: { signal_id × actor_domain } → ScoredObservation,
597
+ * i.e. 5 signals × 3 domains (life / cyber / joint) = 15 cells by default.
598
+ * Scores are 0–100 and each cell reports its own eventCount and confidence
599
+ * so the evidence gate (from step 2) can decide presence downstream.
600
+ *
601
+ * Defaults declared by the NeuroVerse base worldmodel:
602
+ * clarity · ownership · follow_through · alignment · decision_momentum
603
+ *
604
+ * A worldmodel can declare additional signals by providing extra extractors.
605
+ * The extractor interface is deterministic and stateless — no LLM calls, no
606
+ * heuristics that depend on run order.
607
+ *
608
+ * Step-4 scope note: the default extractors below are intentionally simple
609
+ * heuristics. They produce defensible numbers on synthetic and real events,
610
+ * but Phase-4 validation with Auki's repos is where the scoring gets tuned.
611
+ * Extractors are pluggable precisely so they can be replaced without
612
+ * touching downstream math.
613
+ */
614
+
615
+ /**
616
+ * An Event that has been tagged with its ActorDomain. Produced by
617
+ * `classifyEvents` and consumed by extractors. Pre-classifying once avoids
618
+ * re-running the classifier for every (signal × domain) cell.
619
+ */
620
+ interface ClassifiedEvent {
621
+ event: Event;
622
+ domain: ActorDomain;
623
+ }
624
+ /**
625
+ * One cell of the signal matrix: a score for a given signal in a given
626
+ * actor_domain. Downstream math reads `score`, `eventCount`, `confidence`
627
+ * and passes each cell through the evidence gate.
628
+ */
629
+ interface Signal {
630
+ id: string;
631
+ domain: ActorDomain;
632
+ score: number;
633
+ eventCount: number;
634
+ confidence: number;
635
+ }
636
+ /** The computed 5 × 3 matrix (or whichever dimensions the extractors define). */
637
+ type SignalMatrix = readonly Signal[];
638
+ /**
639
+ * The three-tuple a SignalExtractor returns for a given (signal, domain).
640
+ */
641
+ interface ExtractionResult {
642
+ score: number;
643
+ eventCount: number;
644
+ confidence: number;
645
+ }
646
+ /**
647
+ * A named extraction routine. Given all classified events and a target
648
+ * domain, produce a score for this signal in that domain.
649
+ *
650
+ * Extractors must be:
651
+ * - deterministic (same input → same output)
652
+ * - stateless (no hidden mutable state between calls)
653
+ * - side-effect free (no IO, no LLM calls, no wall-clock reads)
654
+ *
655
+ * An extractor that can't score its signal in a given domain should still
656
+ * return a well-formed ExtractionResult — typically `{ score: 0,
657
+ * eventCount: 0, confidence: 0 }` — which the evidence gate will filter
658
+ * out as not-present.
659
+ */
660
+ interface SignalExtractor {
661
+ id: string;
662
+ description: string;
663
+ extract(events: readonly ClassifiedEvent[], domain: ActorDomain): ExtractionResult;
664
+ }
665
+ /**
666
+ * Tag every event with its domain, once. Downstream extractors read the
667
+ * tagged stream without reclassifying.
668
+ */
669
+ declare function classifyEvents(events: readonly Event[]): ClassifiedEvent[];
670
+ /**
671
+ * Compute the signal matrix. For each extractor × each domain, run the
672
+ * extractor and collect the resulting Signal.
673
+ *
674
+ * The output is a flat list of Signal cells. Consumers that need the matrix
675
+ * by name or by domain can group as needed; keeping the wire format flat
676
+ * avoids encoding the domain enumeration in the shape.
677
+ */
678
+ declare function extractSignals(events: readonly ClassifiedEvent[], extractors?: readonly SignalExtractor[]): SignalMatrix;
679
+ /**
680
+ * The five default extractors declared by the NeuroVerse base worldmodel.
681
+ * Each is a simple, defensible heuristic for Phase 1 — all are pluggable
682
+ * and intended to be tuned against real deployment data in Phase 4.
683
+ */
684
+ declare const DEFAULT_SIGNAL_EXTRACTORS: readonly SignalExtractor[];
685
+
686
+ /**
687
+ * @neuroverseos/governance/radiant — system prompt composer
688
+ *
689
+ * Reads worldmodel content (markdown) + a RenderingLens and composes the
690
+ * system prompt that governs the AI's reasoning and output.
691
+ *
692
+ * Pure function. No AI calls. No IO. Deterministic: same inputs, same
693
+ * string output. Testable without an API key.
694
+ *
695
+ * The composed prompt has four sections:
696
+ * 1. Worldmodel context — the compiled worldmodel(s) as readable markdown
697
+ * 2. Analytical frame — the lens's primary_frame (how to reason)
698
+ * 3. Voice + vocabulary — the lens's voice directives + vocabulary map
699
+ * 4. Guardrails — forbidden phrases + output translation discipline
700
+ *
701
+ * The AI receives this as a system message. The user's query is a separate
702
+ * user message. The separation means the system prompt can be cached across
703
+ * queries (if the worldmodel + lens haven't changed).
704
+ */
705
+
706
+ /**
707
+ * Compose the system prompt from worldmodel content + rendering lens.
708
+ *
709
+ * @param worldmodelContent — raw markdown of the worldmodel source file(s).
710
+ * If multiple worldmodels are loaded, concatenate them with a separator.
711
+ * @param lens — the active rendering lens.
712
+ * @returns a system prompt string ready for the AI's system message.
713
+ */
714
+ declare function composeSystemPrompt(worldmodelContent: string, lens: RenderingLens): string;
715
+
716
+ /**
717
+ * @neuroverseos/governance/radiant — voice check
718
+ *
719
+ * Post-processes AI output to detect forbidden phrases from the active
720
+ * rendering lens. Returns violations. The caller (CLI, MCP server,
721
+ * renderer) decides what to do — fail, warn, or retry.
722
+ *
723
+ * Pure function. No AI calls. Deterministic.
724
+ */
725
+
726
+ /**
727
+ * A detected forbidden-phrase violation in the output text.
728
+ */
729
+ interface VoiceViolation {
730
+ /** The forbidden phrase that was matched (case-insensitive). */
731
+ phrase: string;
732
+ /** Character offset where the phrase was found. */
733
+ offset: number;
734
+ }
735
+ /**
736
+ * Check text for forbidden phrases from the active lens.
737
+ *
738
+ * Match is case-insensitive substring. Returns all violations found,
739
+ * in order of appearance. Empty array = clean output.
740
+ */
741
+ declare function checkForbiddenPhrases(lens: RenderingLens, text: string): VoiceViolation[];
742
+
743
+ /**
744
+ * @neuroverseos/governance/radiant — AI adapter
745
+ *
746
+ * Abstract interface for calling an LLM, plus a concrete Anthropic
747
+ * implementation using raw fetch (no SDK dependency).
748
+ *
749
+ * The interface is intentionally thin — one function, string in, string
750
+ * out. The system prompt is composed upstream by `composeSystemPrompt`;
751
+ * the AI adapter just sends it and the user query, returns the response.
752
+ *
753
+ * Phase 1 targets Anthropic Claude. The interface is generic enough to
754
+ * add OpenAI, LangChain, or local-model implementations later.
755
+ */
756
+ /**
757
+ * A minimal AI completion interface. Implementors call an LLM with a
758
+ * system prompt and a user query and return the response text.
759
+ */
760
+ interface RadiantAI {
761
+ complete(systemPrompt: string, userQuery: string): Promise<string>;
762
+ }
763
+ /**
764
+ * Create an Anthropic Claude AI adapter using raw fetch (no SDK).
765
+ *
766
+ * @param apiKey — Anthropic API key. Read from ANTHROPIC_API_KEY env.
767
+ * @param model — model identifier. Default: claude-sonnet-4-20250514.
768
+ * @param maxTokens — max tokens for the response. Default: 4096.
769
+ */
770
+ declare function createAnthropicAI(apiKey: string, model?: string, maxTokens?: number): RadiantAI;
771
+ /**
772
+ * Create a mock AI adapter for testing. Returns a fixed response
773
+ * without calling any API.
774
+ */
775
+ declare function createMockAI(fixedResponse: string): RadiantAI;
776
+
777
+ /**
778
+ * @neuroverseos/governance/radiant — scope resolution
779
+ *
780
+ * Parses scope strings like "aukiverse/posemesh" into typed scope objects
781
+ * that adapters (GitHub, etc.) know how to fetch.
782
+ */
783
+ /**
784
+ * A GitHub repository scope — the unit of activity Radiant reads.
785
+ */
786
+ interface RepoScope {
787
+ owner: string;
788
+ repo: string;
789
+ }
790
+ /**
791
+ * Parse a scope string into a RepoScope.
792
+ *
793
+ * Accepts:
794
+ * "owner/repo"
795
+ * "https://github.com/owner/repo"
796
+ * "github.com/owner/repo"
797
+ *
798
+ * Throws on unparseable input.
799
+ */
800
+ declare function parseRepoScope(scope: string): RepoScope;
801
+ /**
802
+ * Format a RepoScope back to a display string.
803
+ */
804
+ declare function formatScope(scope: RepoScope): string;
805
+
806
+ /**
807
+ * @neuroverseos/governance/radiant — GitHub activity adapter
808
+ *
809
+ * Fetches recent activity from a GitHub repository and maps it to
810
+ * Radiant's Event type. This is the data source for the behavioral
811
+ * analysis pipeline (signals → patterns → alignment scores).
812
+ *
813
+ * Uses raw fetch (no Octokit dependency). Requires a GitHub token
814
+ * with repo read access (fine-grained PAT or classic with `repo` scope).
815
+ *
816
+ * What it fetches:
817
+ * - Recent commits (with author, message, co-authors)
818
+ * - Recent pull requests (with description, author, state)
819
+ * - Recent PR reviews (with reviewer, state, body)
820
+ * - Recent issue/PR comments (with author, body, reference)
821
+ *
822
+ * Actor classification:
823
+ * - GitHub user type "Bot" → ActorKind 'bot'
824
+ * - Login ending in "[bot]" → ActorKind 'bot'
825
+ * - Co-authored-by trailers naming known AI tools → ActorKind 'ai'
826
+ * - Everything else → ActorKind 'human'
827
+ *
828
+ * Pagination: fetches one page per endpoint (up to perPage items).
829
+ * Sufficient for most repos' 14-day windows. Full pagination is a
830
+ * future refinement.
831
+ */
832
+
833
+ interface GitHubFetchOptions {
834
+ /** How many days of history to fetch. Default: 14. */
835
+ windowDays?: number;
836
+ /** Items per page per endpoint. Default: 100. */
837
+ perPage?: number;
838
+ }
839
+ /**
840
+ * Fetch recent activity from a GitHub repo and return Radiant Events.
841
+ *
842
+ * @param scope — owner/repo
843
+ * @param token — GitHub personal access token
844
+ * @param options — window size and pagination
845
+ */
846
+ declare function fetchGitHubActivity(scope: RepoScope, token: string, options?: GitHubFetchOptions): Promise<Event[]>;
847
+ /**
848
+ * Create a mock GitHub adapter for testing. Returns fixed events
849
+ * without calling the GitHub API.
850
+ */
851
+ declare function createMockGitHubAdapter(fixedEvents: Event[]): typeof fetchGitHubActivity;
852
+
853
+ /**
854
+ * @neuroverseos/governance/radiant — ExoCortex adapter
855
+ *
856
+ * Reads stated intent from an exocortex directory. This is the "should"
857
+ * side of the intent-vs-behavior comparison. GitHub provides the "did."
858
+ * Radiant reads both and surfaces where they diverge.
859
+ *
860
+ * The adapter reads standard exocortex files:
861
+ * - attention.md — what the person/team is focused on RIGHT NOW
862
+ * - goals.md — what they're working toward
863
+ * - identity.md — who they are, what they value
864
+ * - sprint.md or src/sprint.md — current sprint focus
865
+ * - org/organization.md — org mission and values (if symlinked)
866
+ * - org/methods.md — how the org operates (if symlinked)
867
+ *
868
+ * Returns a structured ExocortexContext that the AI interpretation
869
+ * prompt uses to compare stated intent against observed behavior.
870
+ *
871
+ * All file reads are optional — missing files are silently skipped.
872
+ * An exocortex with only attention.md is still useful.
873
+ */
874
+ interface ExocortexContext {
875
+ /** What the person/team says they're focused on right now. */
876
+ attention: string | null;
877
+ /** What they're working toward. */
878
+ goals: string | null;
879
+ /** Who they are, what they value. */
880
+ identity: string | null;
881
+ /** Current sprint focus. */
882
+ sprint: string | null;
883
+ /** Org mission and values (from org/organization.md). */
884
+ organization: string | null;
885
+ /** How the org operates (from org/methods.md). */
886
+ methods: string | null;
887
+ /** The directory path that was read. */
888
+ source: string;
889
+ /** How many files were found and loaded. */
890
+ filesLoaded: number;
891
+ }
892
+ /**
893
+ * Read stated intent from an exocortex directory.
894
+ *
895
+ * Silently skips missing files. Returns whatever it finds. An exocortex
896
+ * with only attention.md is still useful — partial context is better
897
+ * than no context.
898
+ */
899
+ declare function readExocortex(dirPath: string): ExocortexContext;
900
+ /**
901
+ * Format the exocortex context as a section for the AI interpretation
902
+ * prompt. Only includes fields that were actually loaded.
903
+ */
904
+ declare function formatExocortexForPrompt(ctx: ExocortexContext): string;
905
+ /**
906
+ * One-line summary of what was loaded, for CLI status output.
907
+ */
908
+ declare function summarizeExocortex(ctx: ExocortexContext): string;
909
+
910
+ /**
911
+ * @neuroverseos/governance/radiant — AI pattern interpretation
912
+ *
913
+ * Step 5 (reframed): the AI identifies patterns in the signal matrix
914
+ * and event stream, governed by the worldmodel's invariants and the
915
+ * rendering lens's voice constraints.
916
+ *
917
+ * Hybrid vocabulary:
918
+ * - canonical: patterns the worldmodel declares (the AI labels them
919
+ * when it sees matching evidence)
920
+ * - candidate: patterns the AI discovers that the worldmodel hasn't
921
+ * declared (surfaced as "emergent," tracked for worldmodel evolution)
922
+ *
923
+ * Guardrails (enforced by interpretPatterns):
924
+ * - Every pattern must cite specific signals and events (no hallucination)
925
+ * - Candidate patterns are explicitly labeled as not-yet-in-worldmodel
926
+ * - The AI prompt includes the lens's forbidden phrases + voice rules
927
+ * - Output is parsed + validated before being returned
928
+ *
929
+ * The AI does the thinking. The governance layer checks the work.
930
+ */
931
+
932
+ interface InterpretInput {
933
+ /** The 5×3 signal matrix from extractSignals. */
934
+ signals: readonly Signal[];
935
+ /** Classified events from the adapter. */
936
+ events: readonly ClassifiedEvent[];
937
+ /** Raw worldmodel content (markdown). */
938
+ worldmodelContent: string;
939
+ /** The active rendering lens. */
940
+ lens: RenderingLens;
941
+ /** AI adapter to call for interpretation. */
942
+ ai: RadiantAI;
943
+ /** Known canonical pattern names from the worldmodel (optional). */
944
+ canonicalPatterns?: readonly string[];
945
+ /** Stated intent from the exocortex (optional). When present, the AI
946
+ * compares stated intent against observed behavior and surfaces gaps. */
947
+ statedIntent?: string;
948
+ }
949
+ interface InterpretResult {
950
+ patterns: ObservedPattern[];
951
+ /** A strategic thesis paragraph (3-5 sentences) weaving patterns into one argument. */
952
+ meaning: string;
953
+ /** 1-3 direct imperatives, OR explicit acknowledgment that nothing needs action. */
954
+ move: string;
955
+ raw_ai_response: string;
956
+ }
957
+ /**
958
+ * Ask the AI to identify patterns in the signal matrix + event stream.
959
+ *
960
+ * The AI receives:
961
+ * - The signal matrix summary
962
+ * - A sample of recent events (capped for context length)
963
+ * - The worldmodel context
964
+ * - The lens's analytical frame (evaluation questions, voice rules)
965
+ * - A list of canonical pattern names (if any)
966
+ * - Instructions to produce structured JSON
967
+ *
968
+ * The response is parsed as JSON, validated, and each pattern is typed
969
+ * as canonical or candidate.
970
+ */
971
+ declare function interpretPatterns(input: InterpretInput): Promise<InterpretResult>;
972
+
973
+ /**
974
+ * @neuroverseos/governance/radiant — governance audit
975
+ *
976
+ * Runs each GitHub event through the NeuroverseOS guard engine against
977
+ * the compiled worldmodel. Produces an audit trail showing which events
978
+ * triggered governance (BLOCK/MODIFY), which side (human/AI/joint),
979
+ * and what invariants were tested.
980
+ *
981
+ * This is where Radiant meets the NeuroverseOS governance engine.
982
+ * Same evaluateGuard() that runs at API-level, applied retroactively
983
+ * to activity that already happened. The audit trail is the proof that
984
+ * the cocoon's walls are holding — or shows where they're being tested.
985
+ */
986
+
987
+ interface GovernanceVerdict {
988
+ eventId: string;
989
+ domain: ActorDomain;
990
+ status: 'ALLOW' | 'BLOCK' | 'MODIFY' | 'PAUSE' | 'PENALIZE' | 'REWARD';
991
+ reason?: string;
992
+ ruleId?: string;
993
+ warning?: string;
994
+ }
995
+ interface GovernanceAudit {
996
+ totalEvents: number;
997
+ human: {
998
+ allow: number;
999
+ modify: number;
1000
+ block: number;
1001
+ details: GovernanceVerdict[];
1002
+ };
1003
+ cyber: {
1004
+ allow: number;
1005
+ modify: number;
1006
+ block: number;
1007
+ details: GovernanceVerdict[];
1008
+ };
1009
+ joint: {
1010
+ allow: number;
1011
+ modify: number;
1012
+ block: number;
1013
+ details: GovernanceVerdict[];
1014
+ };
1015
+ /** Summary for rendering — most important findings. */
1016
+ summary: string;
1017
+ }
1018
+ /**
1019
+ * Run each classified event through the guard engine against the compiled
1020
+ * worldmodel. Returns a structured audit with human/cyber/joint breakdown.
1021
+ *
1022
+ * @param events — classified events from the GitHub adapter
1023
+ * @param worldPath — path to a compiled .nv-world.md or world directory
1024
+ */
1025
+ declare function auditGovernance(events: readonly ClassifiedEvent[], worldPath: string): Promise<GovernanceAudit>;
1026
+
1027
+ /**
1028
+ * @neuroverseos/governance/radiant — renderer
1029
+ *
1030
+ * Takes signals + patterns + scores + lens metadata and produces the
1031
+ * structured output Nils reads. Two output modes:
1032
+ * - text: the EMERGENT / MEANING / MOVE structure for terminal display
1033
+ * - yaml+text: Memory Palace coded read file (YAML frontmatter + prose)
1034
+ *
1035
+ * The renderer enforces the lens's voice rules: forbidden phrases are
1036
+ * checked, bucket names are never leaked, vocabulary is Auki-native.
1037
+ */
1038
+
1039
+ interface RenderInput {
1040
+ scope: RepoScope;
1041
+ windowDays: number;
1042
+ eventCount: number;
1043
+ signals: readonly Signal[];
1044
+ patterns: readonly ObservedPattern[];
1045
+ scores: {
1046
+ A_L: Score;
1047
+ A_C: Score;
1048
+ A_N: Score;
1049
+ R: Score;
1050
+ };
1051
+ lens: RenderingLens;
1052
+ /** AI-generated meaning + move sections (from the interpretation step). */
1053
+ meaning?: string;
1054
+ move?: string;
1055
+ /** Number of prior Radiant reads available (0 = first run). */
1056
+ priorReadCount?: number;
1057
+ /** Governance audit trail — events evaluated against the worldmodel. */
1058
+ governance?: GovernanceAudit;
1059
+ }
1060
+ interface RenderOutput {
1061
+ /** The human-readable text output for terminal display. */
1062
+ text: string;
1063
+ /** The Memory Palace coded YAML frontmatter (Tier 2 structured signals). */
1064
+ frontmatter: string;
1065
+ }
1066
+ declare function render(input: RenderInput): RenderOutput;
1067
+
1068
+ /**
1069
+ * @neuroverseos/governance/radiant — Memory Palace file operations
1070
+ *
1071
+ * Writes Radiant reads to the exocortex as dated markdown files (with
1072
+ * YAML frontmatter for structured signal data). Reads prior files to
1073
+ * detect pattern persistence across runs.
1074
+ *
1075
+ * The exocortex directory IS the Memory Palace. Files are the tiers:
1076
+ * - reads/YYYY-MM-DD.md = Tier 2 (structured signals) + Tier 3 (narrative)
1077
+ * - knowledge.md = accumulated pattern facts with persistence counts
1078
+ *
1079
+ * No database. The file system is the time series. Git is the versioning.
1080
+ */
1081
+ interface PriorRead {
1082
+ date: string;
1083
+ filename: string;
1084
+ /** Pattern names found in this read (parsed from frontmatter). */
1085
+ patternNames: string[];
1086
+ /** Raw frontmatter content for signal comparison. */
1087
+ frontmatter: string;
1088
+ }
1089
+ interface PatternPersistence {
1090
+ name: string;
1091
+ /** How many prior reads contained this pattern. */
1092
+ occurrences: number;
1093
+ /** Dates this pattern was observed. */
1094
+ dates: string[];
1095
+ }
1096
+ /**
1097
+ * Write a Radiant read to the exocortex. Creates the directory structure
1098
+ * if it doesn't exist.
1099
+ *
1100
+ * @param exocortexDir — root of the exocortex (e.g. ~/exocortex/)
1101
+ * @param frontmatter — YAML frontmatter (Tier 2 structured signals)
1102
+ * @param text — prose output (Tier 3 narrative)
1103
+ * @returns the path of the written file
1104
+ */
1105
+ declare function writeRead(exocortexDir: string, frontmatter: string, text: string): string;
1106
+ /**
1107
+ * Items from the worldmodel that Radiant tracks for subtraction proposals.
1108
+ */
1109
+ interface WorldmodelItem {
1110
+ type: 'invariant' | 'drift_behavior' | 'aligned_behavior' | 'decision_priority';
1111
+ name: string;
1112
+ }
1113
+ /**
1114
+ * Update the knowledge file with:
1115
+ * - Pattern persistence (what keeps recurring → consider adding)
1116
+ * - Subtraction proposals (what hasn't fired → consider removing)
1117
+ * - Active items (what recently triggered → keep)
1118
+ *
1119
+ * The leader reads this file and makes deliberate, rare, bidirectional
1120
+ * changes to the worldmodel. Radiant proposes; the human decides.
1121
+ */
1122
+ declare function updateKnowledge(exocortexDir: string, persistence: PatternPersistence[], options?: {
1123
+ /** Items declared in the worldmodel (invariants, drift behaviors, etc.) */
1124
+ declaredItems?: WorldmodelItem[];
1125
+ /** Item names that triggered governance in this read */
1126
+ triggeredItems?: string[];
1127
+ /** Total number of reads completed (for "hasn't fired in N reads" tracking) */
1128
+ totalReads?: number;
1129
+ }): string;
1130
+ /**
1131
+ * Load prior Radiant reads from the exocortex. Returns them sorted by
1132
+ * date (oldest first). Each read has its pattern names extracted from
1133
+ * the frontmatter.
1134
+ */
1135
+ declare function loadPriorReads(exocortexDir: string): PriorRead[];
1136
+ /**
1137
+ * Compute pattern persistence across prior reads + the current patterns.
1138
+ */
1139
+ declare function computePersistence(priorReads: PriorRead[], currentPatternNames: string[]): PatternPersistence[];
1140
+ /**
1141
+ * Format prior-read context for the AI interpretation prompt.
1142
+ * Tells the AI what patterns were seen in previous runs so it can
1143
+ * reference persistence.
1144
+ */
1145
+ declare function formatPriorReadsForPrompt(priorReads: PriorRead[]): string;
1146
+
1147
+ /**
1148
+ * @neuroverseos/governance/radiant — `think` command
1149
+ *
1150
+ * The first composable npm command: load worldmodels + a rendering lens,
1151
+ * send a query to the AI with the composed system prompt, check the
1152
+ * response for forbidden phrases, return the result.
1153
+ *
1154
+ * This is Stage A — the voice layer. No GitHub activity needed. No signal
1155
+ * extraction. No pattern interpretation. Just: worldmodel + lens + AI + query.
1156
+ *
1157
+ * Usage (programmatic — CLI wiring lands in a follow-on commit):
1158
+ *
1159
+ * import { think } from '@neuroverseos/governance/radiant';
1160
+ * const result = await think({
1161
+ * worldmodelContent: fs.readFileSync('./auki.worldmodel.md', 'utf-8'),
1162
+ * lensId: 'auki-builder',
1163
+ * query: 'Should we merge PR #247?',
1164
+ * ai: createAnthropicAI(process.env.ANTHROPIC_API_KEY!),
1165
+ * });
1166
+ * console.log(result.response);
1167
+ */
1168
+
1169
+ interface ThinkInput {
1170
+ /** Raw markdown content of the worldmodel(s). Concatenate multiple with a separator. */
1171
+ worldmodelContent: string;
1172
+ /** Rendering lens id. Must be registered in LENSES. */
1173
+ lensId: string;
1174
+ /** The user's query — any natural-language question or prompt. */
1175
+ query: string;
1176
+ /** AI adapter instance (Anthropic, OpenAI, mock, etc.). */
1177
+ ai: RadiantAI;
1178
+ }
1179
+ interface ThinkResult {
1180
+ /** The AI's response text, after voice-check. */
1181
+ response: string;
1182
+ /** The lens that was applied. */
1183
+ lens: string;
1184
+ /** Any forbidden-phrase violations detected in the response. */
1185
+ voiceViolations: VoiceViolation[];
1186
+ /** Whether the response passed the voice check (no violations). */
1187
+ voiceClean: boolean;
1188
+ /** The system prompt that was composed (for transparency / debugging). */
1189
+ systemPrompt: string;
1190
+ }
1191
+ /**
1192
+ * Think through a query using the loaded worldmodel + rendering lens.
1193
+ *
1194
+ * The function:
1195
+ * 1. Resolves the lens by id (fails if not registered)
1196
+ * 2. Composes the system prompt from worldmodel content + lens
1197
+ * 3. Calls the AI adapter with system prompt + user query
1198
+ * 4. Checks the response for forbidden phrases
1199
+ * 5. Returns the response + voice-check results
1200
+ *
1201
+ * The caller decides what to do with voice violations — surface them,
1202
+ * retry, or accept the response as-is. The `think` command itself does
1203
+ * not retry or modify the response.
1204
+ */
1205
+ declare function think(input: ThinkInput): Promise<ThinkResult>;
1206
+
1207
+ /**
1208
+ * @neuroverseos/governance/radiant — `emergent` command
1209
+ *
1210
+ * The behavioral analysis command. Reads GitHub activity for a repo,
1211
+ * classifies events, extracts signals, asks the AI to interpret patterns,
1212
+ * computes L/C/N/R alignment scores, applies the rendering lens, and
1213
+ * produces the EMERGENT / MEANING / MOVE output.
1214
+ *
1215
+ * This is the command that produces the output Nils reads.
1216
+ *
1217
+ * Usage (programmatic):
1218
+ * import { emergent } from '@neuroverseos/governance/radiant';
1219
+ * const result = await emergent({ scope, token, ... });
1220
+ * console.log(result.text);
1221
+ *
1222
+ * Usage (CLI — wired in cli/radiant.ts):
1223
+ * neuroverse radiant emergent aukiverse/posemesh --lens auki-builder
1224
+ */
1225
+
1226
+ interface EmergentInput {
1227
+ scope: RepoScope;
1228
+ githubToken: string;
1229
+ worldmodelContent: string;
1230
+ lensId: string;
1231
+ ai: RadiantAI;
1232
+ windowDays?: number;
1233
+ canonicalPatterns?: readonly string[];
1234
+ /** Path to an exocortex directory. When present, Radiant reads stated
1235
+ * intent (attention, goals, sprint) and compares against observed
1236
+ * behavior from GitHub. The gap is the most valuable signal. */
1237
+ exocortexPath?: string;
1238
+ /** Path to a compiled world directory (for governance audit).
1239
+ * When present, each event is evaluated through evaluateGuard
1240
+ * and the GOVERNANCE section appears in the output. */
1241
+ worldPath?: string;
1242
+ }
1243
+ interface EmergentResult {
1244
+ /** The rendered text output (EMERGENT / MEANING / MOVE). */
1245
+ text: string;
1246
+ /** The YAML frontmatter for Memory Palace coding. */
1247
+ frontmatter: string;
1248
+ /** Voice violations detected in AI output. */
1249
+ voiceViolations: VoiceViolation[];
1250
+ voiceClean: boolean;
1251
+ /** Raw signal matrix for inspection. */
1252
+ signals: readonly Signal[];
1253
+ /** Raw scores for inspection. */
1254
+ scores: {
1255
+ A_L: Score;
1256
+ A_C: Score;
1257
+ A_N: Score;
1258
+ R: Score;
1259
+ };
1260
+ /** Event count fetched from GitHub. */
1261
+ eventCount: number;
1262
+ }
1263
+ declare function emergent(input: EmergentInput): Promise<EmergentResult>;
1264
+
1265
+ /**
1266
+ * @neuroverseos/governance/radiant
1267
+ *
1268
+ * Radiant — Behavioral Intelligence for Collaboration Systems.
1269
+ *
1270
+ * ExoCortex remembers what happened. Radiant understands what it means —
1271
+ * relative to your culture and strategy — and tells you what to do next.
1272
+ *
1273
+ * This module consumes the existing worldmodel pipeline (parse → compile),
1274
+ * guard engine, lens system, and signal schema exported from
1275
+ * `@neuroverseos/governance`, and layers on top:
1276
+ *
1277
+ * - L/C/N math (Life / Cyber gyroscopes inside the NeuroverseOS universe)
1278
+ * - actor_domain classification (life | cyber | joint)
1279
+ * - 5 signals × 3 domains = 15 behavioral values
1280
+ * - 5 named pattern compositions
1281
+ * - Rendering lens layer (auki-builder first) that transforms patterns
1282
+ * before the renderer sees them — deterministic shaping, no LLM in path
1283
+ * - Stateless commands: emergent, decision
1284
+ * - Stateful (via MemoryProvider) commands: drift, evolve
1285
+ * - Memory Palace 4-layer coding standard (compression / baselines /
1286
+ * knowledge / synthesis) with a SQLite reference implementation
1287
+ * - CLI entry (bin/radiant.ts) and MCP server entry (bin/radiant-mcp.ts)
1288
+ *
1289
+ * Build state: Phase 1 complete — voice layer, behavioral dashboard,
1290
+ * MCP server, Memory Palace write-back, governance audit, ExoCortex
1291
+ * handshake. See radiant/PROJECT-PLAN.md for the full roadmap.
1292
+ *
1293
+ * Usage:
1294
+ * import {
1295
+ * think, emergent, scoreLife, classifyActorDomain,
1296
+ * aukiBuilderLens, checkForbiddenPhrases,
1297
+ * } from '@neuroverseos/governance/radiant';
1298
+ */
1299
+ declare const RADIANT_PACKAGE_VERSION = "0.0.0";
1300
+
1301
+ export { type Actor, type ActorDomain, type ActorKind, type AlignmentStatus, type BridgingComponent, type BridgingComponentScore, type ClassifiedEvent, type CyberCapability, type CyberDimension, DEFAULT_EVIDENCE_GATE, DEFAULT_SIGNAL_EXTRACTORS, type EmergentInput, type EmergentResult, type Event, type EventReference, type EvidenceGate, type ExemplarRef, type ExocortexContext, type ExtractionResult, type GitHubFetchOptions, type GovernanceAudit, type GovernanceVerdict, type InterpretInput, type InterpretResult, LENSES, type LensVocabulary, type LifeCapability, type LifeDimension, type ObservedPattern, type OverlapDef, type PatternEvidence, type PatternPersistence, type PrimaryFrame, type PriorRead, RADIANT_PACKAGE_VERSION, type RadiantAI, type RenderInput, type RenderOutput, type RenderingLens, type RepoScope, type Score, type ScoreSentinel, type ScoredObservation, type Signal, type SignalExtractor, type SignalMatrix, type ThinkInput, type ThinkResult, type VoiceDirectives, type VoiceViolation, type WorldmodelItem, auditGovernance, aukiBuilderLens, checkForbiddenPhrases, classifyActorDomain, classifyEvents, composeSystemPrompt, computePersistence, createAnthropicAI, createMockAI, createMockGitHubAdapter, emergent, extractSignals, fetchGitHubActivity, formatExocortexForPrompt, formatPriorReadsForPrompt, formatScope, getLens, interpretPatterns, isPresent, isScored, isSentinel, listLenses, loadPriorReads, parseRepoScope, presenceAverage, readExocortex, render, scoreComposite, scoreCyber, scoreLife, scoreNeuroVerse, summarizeExocortex, think, updateKnowledge, writeRead };