@neuroverseos/governance 0.6.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1700 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/radiant/index.ts
21
+ var radiant_exports = {};
22
+ __export(radiant_exports, {
23
+ DEFAULT_EVIDENCE_GATE: () => DEFAULT_EVIDENCE_GATE,
24
+ DEFAULT_SIGNAL_EXTRACTORS: () => DEFAULT_SIGNAL_EXTRACTORS,
25
+ LENSES: () => LENSES,
26
+ RADIANT_PACKAGE_VERSION: () => RADIANT_PACKAGE_VERSION,
27
+ aukiBuilderLens: () => aukiBuilderLens,
28
+ checkForbiddenPhrases: () => checkForbiddenPhrases,
29
+ classifyActorDomain: () => classifyActorDomain,
30
+ classifyEvents: () => classifyEvents,
31
+ composeSystemPrompt: () => composeSystemPrompt,
32
+ createAnthropicAI: () => createAnthropicAI,
33
+ createMockAI: () => createMockAI,
34
+ createMockGitHubAdapter: () => createMockGitHubAdapter,
35
+ emergent: () => emergent,
36
+ extractSignals: () => extractSignals,
37
+ fetchGitHubActivity: () => fetchGitHubActivity,
38
+ formatExocortexForPrompt: () => formatExocortexForPrompt,
39
+ formatScope: () => formatScope,
40
+ getLens: () => getLens,
41
+ interpretPatterns: () => interpretPatterns,
42
+ isPresent: () => isPresent,
43
+ isScored: () => isScored,
44
+ isSentinel: () => isSentinel,
45
+ listLenses: () => listLenses,
46
+ parseRepoScope: () => parseRepoScope,
47
+ presenceAverage: () => presenceAverage,
48
+ readExocortex: () => readExocortex,
49
+ render: () => render,
50
+ scoreComposite: () => scoreComposite,
51
+ scoreCyber: () => scoreCyber,
52
+ scoreLife: () => scoreLife,
53
+ scoreNeuroVerse: () => scoreNeuroVerse,
54
+ summarizeExocortex: () => summarizeExocortex,
55
+ think: () => think
56
+ });
57
+ module.exports = __toCommonJS(radiant_exports);
58
+
59
+ // src/radiant/types.ts
60
+ var DEFAULT_EVIDENCE_GATE = { k: 3, c: 0.5 };
61
+ function isScored(s) {
62
+ return typeof s === "number";
63
+ }
64
+ function isSentinel(s) {
65
+ return typeof s === "string";
66
+ }
67
+
68
+ // src/radiant/core/math.ts
69
+ function isPresent(o, gate = DEFAULT_EVIDENCE_GATE) {
70
+ return o.eventCount >= gate.k && o.confidence >= gate.c;
71
+ }
72
+ function presenceAverage(items, gate = DEFAULT_EVIDENCE_GATE) {
73
+ const present = items.filter((i) => isPresent(i, gate));
74
+ if (present.length === 0) return "INSUFFICIENT_EVIDENCE";
75
+ const sum = present.reduce((acc, i) => acc + i.score, 0);
76
+ return sum / present.length;
77
+ }
78
+ function scoreLife(capability, gate = DEFAULT_EVIDENCE_GATE) {
79
+ return presenceAverage(capability.dimensions, gate);
80
+ }
81
+ function scoreCyber(capability, gate = DEFAULT_EVIDENCE_GATE) {
82
+ return presenceAverage(capability.dimensions, gate);
83
+ }
84
+ function scoreNeuroVerse(components, worldmodelLoaded, gate = DEFAULT_EVIDENCE_GATE) {
85
+ if (!worldmodelLoaded) return "UNAVAILABLE";
86
+ return presenceAverage(components, gate);
87
+ }
88
+ function scoreComposite(a_L, a_C, a_N) {
89
+ const available = [];
90
+ if (isScored(a_L)) available.push(a_L);
91
+ if (isScored(a_C)) available.push(a_C);
92
+ if (isScored(a_N)) available.push(a_N);
93
+ if (available.length === 0) return "INSUFFICIENT_EVIDENCE";
94
+ return available.reduce((a, b) => a + b, 0) / available.length;
95
+ }
96
+
97
+ // src/radiant/core/domain.ts
98
+ function isLifeSide(k) {
99
+ return k === "human" || k === "unknown";
100
+ }
101
+ function isCyberSide(k) {
102
+ return k === "ai" || k === "bot";
103
+ }
104
+ function crossesBoundary(a, b) {
105
+ return isLifeSide(a) && isCyberSide(b) || isCyberSide(a) && isLifeSide(b);
106
+ }
107
+ function classifyActorDomain(event) {
108
+ const primaryKind = event.actor.kind;
109
+ const coKinds = (event.coActors ?? []).map((a) => a.kind);
110
+ const allKinds = [primaryKind, ...coKinds];
111
+ const hasLife = allKinds.some(isLifeSide);
112
+ const hasCyber = allKinds.some(isCyberSide);
113
+ if (hasLife && hasCyber) {
114
+ return "joint";
115
+ }
116
+ if (event.respondsTo && crossesBoundary(primaryKind, event.respondsTo.actor.kind)) {
117
+ return "joint";
118
+ }
119
+ return isCyberSide(primaryKind) ? "cyber" : "life";
120
+ }
121
+
122
+ // src/radiant/lenses/auki-builder.ts
123
+ var AUKI_VANGUARD_FRAME = {
124
+ domains: [
125
+ "future-foresight",
126
+ "narrative-dynamics",
127
+ "shared-prosperity"
128
+ ],
129
+ overlaps: [
130
+ {
131
+ domains: ["future-foresight", "narrative-dynamics"],
132
+ emergent_state: "Inspiration",
133
+ description: "Visionary leaders inspire action by painting a vivid picture of a better future, helping people understand how to get there together. Emerges when long-range thinking meets language that rallies."
134
+ },
135
+ {
136
+ domains: ["narrative-dynamics", "shared-prosperity"],
137
+ emergent_state: "Trust",
138
+ description: "Built through authentic storytelling and consistent delivery on promises, creating a community where contributors feel secure in their contributions. Emerges when clear intent meets fair distribution \u2014 coalitions form here."
139
+ },
140
+ {
141
+ domains: ["shared-prosperity", "future-foresight"],
142
+ emergent_state: "Hope",
143
+ description: "Propels decentralized communities toward a collective future where resources are equitably distributed and success is shared by all. Emerges when long-term infrastructure is architected for collective benefit \u2014 the DePIN / Intercognitive posture."
144
+ }
145
+ ],
146
+ center_identity: "Collective Vanguard Leader",
147
+ evaluation_questions: [
148
+ "What long-range architectural thinking is present? Systems design, scenario planning, critical thinking, ethical judgment \u2014 which of these is visible, which is weak?",
149
+ "What communication and meaning-making is happening? Storytelling, cultural sensitivity, audience engagement, persuasive writing \u2014 who is telling the story of how the pieces connect?",
150
+ "What collaborative and fairness work is happening? Stakeholder management, partnership development, incentive alignment, community building \u2014 who is building coalitions and making sure value flows equitably?",
151
+ "Which overlap states surface \u2014 Inspiration (vision + narrative), Trust (narrative + fairness), Hope (fairness + long-term thinking)?",
152
+ "Is the integration complete (Collective Vanguard Leader manifests across all three dimensions) or is one dimension absent / weak?",
153
+ "If one dimension is weak, what specific skill inside it is the lowest-friction activation point?"
154
+ ],
155
+ scoring_rubric: `For any Auki activity, identify which specific skills are strongly present, which are weak, which are absent. Cite specific evidence for each. Name the overlap emergent states that surface using their plain-English names (Inspiration, Trust, Hope). Do not surface the bucket names (Future Foresight, Narrative Dynamics, Shared Prosperity) in the output \u2014 those are internal reasoning scaffolds, not reader-facing labels. Translate bucket-level findings into skill-level observations: not "Future Foresight is present" but "the architectural thinking is strong \u2014 the systems design is clear"; not "Shared Prosperity is weak" but "partnership development is missing" or "incentive alignment hasn't been established." Center identity (Collective Vanguard Leader) may be named sparingly, only when all three dimensions fully integrate.`,
156
+ /**
157
+ * The skills inside each domain. These are the OUTPUT-FACING vocabulary —
158
+ * the observable behaviors and capabilities readers understand. When the
159
+ * AI renders findings, it uses these skill names, not the bucket names.
160
+ *
161
+ * From Kirsten\'s original vanguard diagram (see exemplars/vanguard-diagram).
162
+ */
163
+ domain_skills: {
164
+ "future-foresight": [
165
+ "strategic thinking",
166
+ "systems design",
167
+ "scenario planning",
168
+ "futurism and trend analysis",
169
+ "critical thinking",
170
+ "innovative problem-solving",
171
+ "data-driven decision-making",
172
+ "ethical judgment and governance",
173
+ "risk assessment and mitigation",
174
+ "curiosity and open-mindedness"
175
+ ],
176
+ "narrative-dynamics": [
177
+ "storytelling and narrative crafting",
178
+ "behavioral psychology and memetics",
179
+ "emotional intelligence",
180
+ "communication and presentation skills",
181
+ "cultural sensitivity and adaptation",
182
+ "social media and viral messaging strategy",
183
+ "brand building and positioning",
184
+ "persuasive writing",
185
+ "visualization and design thinking",
186
+ "audience analysis and engagement"
187
+ ],
188
+ "shared-prosperity": [
189
+ "stakeholder management",
190
+ "collaborative leadership",
191
+ "conflict resolution and mediation",
192
+ "economic and tokenomic design",
193
+ "incentive alignment",
194
+ "community building and management",
195
+ "inclusivity and equity advocacy",
196
+ "partnership development",
197
+ "transparency and accountability",
198
+ "negotiation and diplomacy"
199
+ ]
200
+ },
201
+ /**
202
+ * The translation rule: bucket names stay internal; skills + overlap
203
+ * state names surface in output. This is enforced by both the
204
+ * output-directive (guidance to the AI) and the forbidden-phrases list
205
+ * (renderer-level rejection of any output leaking bucket names).
206
+ */
207
+ output_translation: {
208
+ never_surface_in_output: [
209
+ "Future Foresight",
210
+ "Narrative Dynamics",
211
+ "Shared Prosperity"
212
+ ],
213
+ surface_freely: [
214
+ "Inspiration",
215
+ "Trust",
216
+ "Hope"
217
+ // plus any specific skill name from domain_skills above
218
+ ],
219
+ surface_sparingly: ["Collective Vanguard Leader"],
220
+ translation_examples: [
221
+ {
222
+ internal_reasoning: "Future Foresight is strong",
223
+ external_expression: "the architectural thinking is strong; the systems design is clear"
224
+ },
225
+ {
226
+ internal_reasoning: "Shared Prosperity is weak",
227
+ external_expression: "partnership development is missing; no one has established incentive alignment across teams"
228
+ },
229
+ {
230
+ internal_reasoning: "Narrative Dynamics is absent",
231
+ external_expression: "no one is telling the story of how these pieces connect; the audience does not see the shared vision yet"
232
+ }
233
+ ]
234
+ }
235
+ };
236
+ var AUKI_VOCABULARY = {
237
+ proper_nouns: [
238
+ "$AUKI",
239
+ "Posemesh",
240
+ "Auki Labs",
241
+ "Posemesh Foundation",
242
+ "Intercognitive Foundation",
243
+ "Intercognitive",
244
+ "Sixth Protocol",
245
+ "Fifth Protocol",
246
+ "DePIN",
247
+ "Cactus",
248
+ "Terri",
249
+ "Mech Jagger",
250
+ "peaq",
251
+ "Mawari",
252
+ "GEODNET",
253
+ "Nine Pillars of AI Accessibility",
254
+ "the real world web",
255
+ "the posemesh"
256
+ ],
257
+ // Generic term → Auki-native replacement
258
+ preferred: {
259
+ device: "participant",
260
+ client: "participant",
261
+ "coordinate system": "domain",
262
+ "QR code for calibration": "portal",
263
+ "work request": "task",
264
+ "location alignment": "calibrate",
265
+ "sensor reading": "observation",
266
+ "physical environment": "environment",
267
+ "the network (public-facing)": "the real world web",
268
+ "the network (technical)": "the posemesh",
269
+ "coordination between devices": "spatial orchestration",
270
+ "buying services": "burning tokens for credits",
271
+ "full autonomy": "the full stack",
272
+ "non-GPS environments": "GPS-denied environments",
273
+ "our partners": "the Intercognitive coalition (Auki, peaq, Mawari, GEODNET)"
274
+ },
275
+ architecture: [
276
+ "domain",
277
+ "domain cluster",
278
+ "domain manager",
279
+ "domain owner",
280
+ "semantic layer",
281
+ "topography layer",
282
+ "rendering layer",
283
+ "partitions",
284
+ "observations",
285
+ "portals",
286
+ "participant",
287
+ "supply participant",
288
+ "demand participant",
289
+ "capabilities",
290
+ "tasks",
291
+ "discovery service",
292
+ "DHT",
293
+ "substrate",
294
+ "spatial orchestration",
295
+ "app-free navigation",
296
+ "marker-free VPS",
297
+ "spatially aware",
298
+ "the stack",
299
+ "the robotics stack",
300
+ "GPS-denied",
301
+ "locomotion",
302
+ "manipulation",
303
+ "spatio-semantic perception",
304
+ "mapping",
305
+ "positioning",
306
+ "hybrid robotics",
307
+ "AI copilot",
308
+ "shared spatial layer"
309
+ ],
310
+ economic: [
311
+ "burn",
312
+ "credit",
313
+ "deflationary mint",
314
+ "reputation",
315
+ "vacancy",
316
+ "treasury",
317
+ "utilization rate",
318
+ "initial supply",
319
+ "total supply",
320
+ "organization",
321
+ "trustless",
322
+ "peer-to-peer transactions",
323
+ "machine passports",
324
+ "machine economy"
325
+ ],
326
+ framing: [
327
+ "machine perception",
328
+ "spatial computing",
329
+ "collaborative perception",
330
+ "cognitive liberty",
331
+ "perception-first",
332
+ "protocol-not-product",
333
+ "sovereignty",
334
+ "decentralization",
335
+ "territory capture",
336
+ "foundations-before-execution",
337
+ "make the world machine-readable",
338
+ "connective tissue between digital and physical",
339
+ "open, permissionless, interoperable, private",
340
+ "skip the bottleneck, ship the leverage",
341
+ "coalition before standard",
342
+ "hybrid over pure",
343
+ "augmentation without surveillance",
344
+ "civilization-scale infrastructure",
345
+ "public good, not proprietary asset",
346
+ "Inspiration",
347
+ "Trust",
348
+ "Hope",
349
+ "Collective Vanguard Leader"
350
+ ],
351
+ // System-internal concepts → plain English for output.
352
+ // Readers don't know Radiant's vocabulary. Speaking it to them is jargon.
353
+ jargon_translations: {
354
+ "worldmodel": "your strategy file",
355
+ "canonical pattern": "something Radiant tracks by name over time",
356
+ "candidate pattern": "something Radiant noticed but hasn't been told to watch for",
357
+ "evidence gate": "how much activity Radiant needs before it speaks",
358
+ "invariant": "a rule you declared non-negotiable",
359
+ "signal extraction": "reading the activity",
360
+ "alignment score": "how aligned the work is with what you said matters",
361
+ "actor domain": "who did the work (a person, an AI, or both together)",
362
+ "presence-based averaging": "only counts what actually happened",
363
+ "drift detection": "noticing when things are shifting from what you said you wanted",
364
+ "lens rewrite": "framing adjustment before output",
365
+ "INSUFFICIENT_EVIDENCE": "not enough to say confidently",
366
+ "UNAVAILABLE": "we can't measure this yet"
367
+ }
368
+ };
369
+ var AUKI_VOICE = {
370
+ register: 'diagnosis mode \u2014 compressed, strategic, builder-direct. Closer to the closing paragraph of an Auki year-recap ("2025 was foundations. 2026 is execution.") than to its month-by-month celebration.',
371
+ active_voice: "required",
372
+ specificity: "required",
373
+ hype_vocabulary: "forbidden",
374
+ hedging: "forbidden",
375
+ playfulness: "rare",
376
+ close_with_strategic_frame: "preferred",
377
+ punchline_move: "sparing",
378
+ honesty_about_failure: "required",
379
+ output_translation: `Reason internally through the three-domain frame (Future Foresight, Narrative Dynamics, Shared Prosperity) \u2014 that is the analytical scaffold. Express findings externally in the skills vocabulary INSIDE each domain (e.g. "strategic thinking," "partnership development," "storytelling," "incentive alignment"). Use the overlap state names (Inspiration, Trust, Hope) as plain-English emergent feelings. Do NOT surface the bucket names themselves (Future Foresight, Narrative Dynamics, Shared Prosperity) as labels in output \u2014 they are the model-maker's scaffold, not reader vocabulary. Readers understand skills, not buckets. The bucket names are in the forbidden_phrases list; the renderer will fail output that leaks them. Collective Vanguard Leader may be named sparingly when all three dimensions are fully integrated.`
380
+ };
381
+ var AUKI_FORBIDDEN_PHRASES = Object.freeze([
382
+ // Domain bucket names — never surface to readers; translate to skills
383
+ "future foresight",
384
+ "narrative dynamics",
385
+ "shared prosperity",
386
+ // AI-assistant hedging
387
+ "it may be beneficial to consider",
388
+ "there appears to be",
389
+ "one possible interpretation",
390
+ "it might be worth exploring",
391
+ "it might be worth considering",
392
+ "consider whether",
393
+ "it is worth noting",
394
+ "please note that",
395
+ "it should be noted",
396
+ "in conclusion",
397
+ // Corporate / marketing
398
+ "unparalleled",
399
+ "best-in-class",
400
+ "industry-leading",
401
+ "revolutionary",
402
+ "cutting-edge",
403
+ "state-of-the-art",
404
+ "thrilled to announce",
405
+ "excited to share",
406
+ "game-changing",
407
+ "synergy",
408
+ "synergies",
409
+ "stakeholders",
410
+ // too corporate; prefer named actors
411
+ "end-users",
412
+ "value proposition",
413
+ "paradigm shift",
414
+ // Generic motion
415
+ "going forward",
416
+ "moving forward",
417
+ "at the end of the day",
418
+ "touching base",
419
+ "circle back",
420
+ "deep dive",
421
+ "level set",
422
+ "low-hanging fruit"
423
+ ]);
424
+ var AUKI_PREFERRED_PATTERNS = Object.freeze([
425
+ // Direct declarative observation
426
+ "[Specific skill] is strong here. [Named evidence].",
427
+ "[Specific skill] is breaking here. [Named evidence].",
428
+ "[Specific skill] is missing. [Named consequence].",
429
+ // Skills-level diagnosis (replaces the bucket-speak pattern)
430
+ "The [specific skill] is clear \u2014 [specific evidence]. But [another specific skill] is missing \u2014 [specific effect]. [Imperative move].",
431
+ "What is missing is [specific skill], not effort.",
432
+ "[Trust | Inspiration | Hope] won't emerge until [skill-A] and [skill-B] happen together.",
433
+ // Imperative move
434
+ "Force [action] or [consequence].",
435
+ "Tighten this or it fragments.",
436
+ "Skip the bottleneck, ship the leverage.",
437
+ "Coalition before standard.",
438
+ // Strategic close — list-becomes-argument (from year-recap)
439
+ "Combine [A, B, C] and suddenly [strategic implication].",
440
+ "[Phase A] was [what you built]. [Phase B] is [what you execute].",
441
+ // Binary stakes (from Intercognitive)
442
+ "[Centralize X in the hands of a few] or [build a decentralized alternative].",
443
+ // Short thesis compression (from glossary)
444
+ "[Subject] is [essential-function] \u2014 [one-line precision].",
445
+ // Named specificity
446
+ "[Named partner/place/number] is the one that matters here.",
447
+ // Honest texture (from year-recap)
448
+ "[Specific thing] is not yet [state] \u2014 [honest qualifier].",
449
+ // Pivot to reality before solution (from hybrid robotics)
450
+ "The truth is [current reality]. [Better approach] is [the move].",
451
+ // Overlap-state compression (surfacing the emergent feel, not the buckets)
452
+ "Trust is not emerging because [specific narrative skill] and [specific coalition skill] are not happening together.",
453
+ "Inspiration is landing here \u2014 [specific evidence of vision + rally].",
454
+ "Hope is present \u2014 [specific evidence of long-term thinking meeting fair distribution]."
455
+ ]);
456
+ var AUKI_STRATEGIC_PATTERNS = Object.freeze([
457
+ "Skip the bottleneck, ship the leverage \u2014 identify the hard layers in the stack, build on the layers that deliver value now.",
458
+ "Coalition before standard \u2014 form the group that will set the rules before the rules need to exist.",
459
+ "Foundations before execution \u2014 build the infrastructure that makes the thing possible; then scale.",
460
+ "Hybrid over pure \u2014 augment humans with AI where full autonomy is not ready; transition later.",
461
+ "Decentralized > proprietary \u2014 when choosing architecture, prefer open / community-governed / interoperable over closed / owned / siloed.",
462
+ "Layered analysis first, strategic move second \u2014 decompose before deciding.",
463
+ 'Named specificity over abstractions \u2014 cite people, places, partners, numbers; never "stakeholders" or "the industry."',
464
+ "Community deployment before public release \u2014 validate with a small group of operators before opening the door.",
465
+ "Cognitive liberty as inviolable constraint \u2014 block any move that violates sovereignty over spatial/sensor data, regardless of other benefits.",
466
+ "Compress mission to one sentence \u2014 one memorable thesis carries more weight than a manifesto."
467
+ ]);
468
+ var AUKI_EXEMPLARS = Object.freeze([
469
+ {
470
+ path: "intercognitive-foundation.md",
471
+ title: "The Intercognitive Foundation",
472
+ exhibits: ["future-foresight", "narrative-dynamics", "shared-prosperity"],
473
+ integration_quality: "full \u2014 all three domains integrated; Collective Vanguard Leader manifests through the coalition itself",
474
+ notes: 'The perfect vanguard exemplar. Future Foresight: inflection-point framing, Nine Pillars architecture. Narrative Dynamics: "the physical world cannot remain a blind spot," rally language, invitation to join. Shared Prosperity: coalition of four founding members, "no single entity should own," community governance, public good framing. When Radiant outputs something that feels vanguard-complete, it should resemble this in structure and tone.'
475
+ },
476
+ {
477
+ path: "hybrid-robotics-essay.md",
478
+ title: "The Case for Hybrid Robotics",
479
+ exhibits: ["future-foresight", "shared-prosperity"],
480
+ integration_quality: "partial \u2014 Future Foresight dominant, Shared Prosperity secondary, Narrative Dynamics present but informing rather than rallying. Overlap: Hope emerges (long-horizon infrastructure for collective benefit).",
481
+ notes: 'Auki teaching how it thinks. The stack-analysis \u2192 bottleneck-identification \u2192 skip-and-ship pattern is a reusable Auki reasoning move. When the AI applies "systems-first" and "leverage-oriented" thinking, it should resemble this essay \u2014 structured, honest about current reality, pivoting to a better approach via layered reasoning.'
482
+ },
483
+ {
484
+ path: "glossary.md",
485
+ title: "Auki Glossary",
486
+ exhibits: ["future-foresight"],
487
+ integration_quality: 'primary-dominant \u2014 Future Foresight dominant (precise technical definitions as long-range conceptual infrastructure). Shared Prosperity implicit (glossary is open, cross-referenced, serves the ecosystem). Narrative Dynamics flashes once ("a mesh of machines reasoning about pose") but is not primary.',
488
+ notes: 'Source of the vocabulary map. Also teaches compression style: one-line precision definitions, cross-reference density, occasional poetic compression. When the renderer produces short thesis sentences, aim for the "mesh of machines reasoning about pose" level of compression.'
489
+ },
490
+ {
491
+ path: "year-recap-2025.md",
492
+ title: "Auki 2025 Year-End Recap",
493
+ exhibits: ["narrative-dynamics", "shared-prosperity"],
494
+ integration_quality: "partial \u2014 Narrative Dynamics dominant, Shared Prosperity strong, Future Foresight arrives only in the closing paragraph. Overlap: Trust emerges (stakeholders can see their place in the collective progress).",
495
+ notes: 'The celebration register \u2014 warm, specific, named. Not the diagnosis register the lens primarily enforces, but the same DNA. Use this exemplar when calibrating how Auki names specifics (Pepito in Bali, Mika Haak at HQ, the HK web3 robotics cabal) and how the "\u2014 literally" punchline move lands. Do NOT mimic the celebration warmth in diagnosis outputs.'
496
+ }
497
+ ]);
498
+ function aukiBuilderRewrite(pattern) {
499
+ if (pattern.evidence.cited_invariant) {
500
+ return {
501
+ ...pattern,
502
+ framing: "invariant pressure",
503
+ emphasis: "worldmodel invariant cited by this observation \u2014 surface the cross-reference",
504
+ compress: true
505
+ };
506
+ }
507
+ if (pattern.type === "candidate") {
508
+ return {
509
+ ...pattern,
510
+ framing: "emergent observation (not yet in worldmodel)",
511
+ emphasis: "candidate pattern \u2014 surface the vanguard-domain analysis (which domain activated this?)",
512
+ compress: true
513
+ };
514
+ }
515
+ return {
516
+ ...pattern,
517
+ framing: "system-level consequence",
518
+ emphasis: "coordination + leverage",
519
+ compress: true
520
+ };
521
+ }
522
+ var aukiBuilderLens = {
523
+ name: "auki-builder",
524
+ description: "Renders behavioral interpretation through the vanguard leadership model \u2014 Future Foresight, Narrative Dynamics, Shared Prosperity. Role-based, not personal. Encodes how Auki-grade builders think and speak when the vanguard model is running. Companion to auki-vanguard.worldmodel.md (the abstract DNA) and the exemplars at src/radiant/examples/auki/exemplars/ (worked implementations).",
525
+ primary_frame: {
526
+ domains: AUKI_VANGUARD_FRAME.domains,
527
+ overlaps: AUKI_VANGUARD_FRAME.overlaps,
528
+ center_identity: AUKI_VANGUARD_FRAME.center_identity,
529
+ evaluation_questions: AUKI_VANGUARD_FRAME.evaluation_questions,
530
+ scoring_rubric: AUKI_VANGUARD_FRAME.scoring_rubric
531
+ },
532
+ vocabulary: AUKI_VOCABULARY,
533
+ voice: AUKI_VOICE,
534
+ forbidden_phrases: AUKI_FORBIDDEN_PHRASES,
535
+ preferred_patterns: AUKI_PREFERRED_PATTERNS,
536
+ strategic_patterns: AUKI_STRATEGIC_PATTERNS,
537
+ exemplar_refs: AUKI_EXEMPLARS,
538
+ rewrite: aukiBuilderRewrite
539
+ };
540
+
541
+ // src/radiant/lenses/index.ts
542
+ var LENSES = Object.freeze({
543
+ "auki-builder": aukiBuilderLens
544
+ });
545
+ function getLens(id) {
546
+ return LENSES[id];
547
+ }
548
+ function listLenses() {
549
+ return Object.freeze(Object.keys(LENSES));
550
+ }
551
+
552
+ // src/radiant/core/signals.ts
553
+ function classifyEvents(events) {
554
+ return events.map((event) => ({
555
+ event,
556
+ domain: classifyActorDomain(event)
557
+ }));
558
+ }
559
+ function extractSignals(events, extractors = DEFAULT_SIGNAL_EXTRACTORS) {
560
+ const domains = ["life", "cyber", "joint"];
561
+ const out = [];
562
+ for (const extractor of extractors) {
563
+ for (const domain of domains) {
564
+ const r = extractor.extract(events, domain);
565
+ out.push({
566
+ id: extractor.id,
567
+ domain,
568
+ score: r.score,
569
+ eventCount: r.eventCount,
570
+ confidence: r.confidence
571
+ });
572
+ }
573
+ }
574
+ return out;
575
+ }
576
+ var ZERO = { score: 0, eventCount: 0, confidence: 0 };
577
+ function inDomain(events, domain) {
578
+ return events.filter((e) => e.domain === domain);
579
+ }
580
+ function confidenceFromCount(count) {
581
+ return Math.min(1, count / 10);
582
+ }
583
+ function clamp100(n) {
584
+ if (n < 0) return 0;
585
+ if (n > 100) return 100;
586
+ return n;
587
+ }
588
+ var CLARITY_EXTRACTOR = {
589
+ id: "clarity",
590
+ description: "Informativeness of event content \u2014 commit messages, PR bodies, review text",
591
+ extract(events, domain) {
592
+ const sub = inDomain(events, domain);
593
+ if (sub.length === 0) return ZERO;
594
+ const totalScore = sub.reduce((acc, e) => {
595
+ const len = (e.event.content ?? "").length;
596
+ const norm = Math.min(len, 200) / 200;
597
+ return acc + norm * 100;
598
+ }, 0);
599
+ return {
600
+ score: clamp100(totalScore / sub.length),
601
+ eventCount: sub.length,
602
+ confidence: confidenceFromCount(sub.length)
603
+ };
604
+ }
605
+ };
606
+ var OWNERSHIP_EXTRACTOR = {
607
+ id: "ownership",
608
+ description: "Clarity of accountability \u2014 fraction of events with a known primary actor",
609
+ extract(events, domain) {
610
+ const sub = inDomain(events, domain);
611
+ if (sub.length === 0) return ZERO;
612
+ const attributed = sub.filter((e) => e.event.actor.kind !== "unknown").length;
613
+ return {
614
+ score: clamp100(attributed / sub.length * 100),
615
+ eventCount: sub.length,
616
+ confidence: confidenceFromCount(sub.length)
617
+ };
618
+ }
619
+ };
620
+ var FOLLOW_THROUGH_EXTRACTOR = {
621
+ id: "follow_through",
622
+ description: "Fraction of events that were followed up \u2014 i.e. referenced by a later event",
623
+ extract(events, domain) {
624
+ const sub = inDomain(events, domain);
625
+ if (sub.length === 0) return ZERO;
626
+ const referencedIds = /* @__PURE__ */ new Set();
627
+ for (const e of events) {
628
+ const ref = e.event.respondsTo?.eventId;
629
+ if (ref) referencedIds.add(ref);
630
+ }
631
+ const followedUp = sub.filter((e) => referencedIds.has(e.event.id)).length;
632
+ return {
633
+ score: clamp100(followedUp / sub.length * 100),
634
+ eventCount: sub.length,
635
+ confidence: confidenceFromCount(sub.length)
636
+ };
637
+ }
638
+ };
639
+ var ALIGNMENT_EXTRACTOR = {
640
+ id: "alignment",
641
+ description: "Coordination pressure \u2014 fraction of events that reference a prior event",
642
+ extract(events, domain) {
643
+ const sub = inDomain(events, domain);
644
+ if (sub.length === 0) return ZERO;
645
+ const referencing = sub.filter((e) => e.event.respondsTo !== void 0).length;
646
+ return {
647
+ score: clamp100(referencing / sub.length * 100),
648
+ eventCount: sub.length,
649
+ confidence: confidenceFromCount(sub.length)
650
+ };
651
+ }
652
+ };
653
+ var DECISION_MOMENTUM_EXTRACTOR = {
654
+ id: "decision_momentum",
655
+ description: "Rate of activity in this domain \u2014 events per day, capped at 10/day",
656
+ extract(events, domain) {
657
+ const sub = inDomain(events, domain);
658
+ if (sub.length === 0) return ZERO;
659
+ if (sub.length < 2) {
660
+ return {
661
+ score: 20,
662
+ // token non-zero score — single event = some motion
663
+ eventCount: sub.length,
664
+ confidence: confidenceFromCount(sub.length)
665
+ };
666
+ }
667
+ const ts = sub.map((e) => Date.parse(e.event.timestamp)).sort((a, b) => a - b);
668
+ const spanMs = ts[ts.length - 1] - ts[0];
669
+ const spanDays = Math.max(spanMs / (24 * 60 * 60 * 1e3), 1 / 24);
670
+ const perDay = sub.length / spanDays;
671
+ const normalized = Math.min(perDay, 10) / 10;
672
+ return {
673
+ score: clamp100(normalized * 100),
674
+ eventCount: sub.length,
675
+ confidence: confidenceFromCount(sub.length)
676
+ };
677
+ }
678
+ };
679
+ var DEFAULT_SIGNAL_EXTRACTORS = Object.freeze([
680
+ CLARITY_EXTRACTOR,
681
+ OWNERSHIP_EXTRACTOR,
682
+ FOLLOW_THROUGH_EXTRACTOR,
683
+ ALIGNMENT_EXTRACTOR,
684
+ DECISION_MOMENTUM_EXTRACTOR
685
+ ]);
686
+
687
+ // src/radiant/core/prompt.ts
688
+ function composeSystemPrompt(worldmodelContent, lens) {
689
+ const sections = [];
690
+ sections.push(
691
+ `## Worldmodel
692
+
693
+ You are operating inside a governed environment. The worldmodel below
694
+ defines the invariants, signals, decision priorities, and behavioral
695
+ expectations for this organization. Every response you produce must
696
+ be grounded in this worldmodel.
697
+
698
+ ` + worldmodelContent
699
+ );
700
+ const frame = lens.primary_frame;
701
+ const questionsBlock = frame.evaluation_questions.map((q, i) => `${i + 1}. ${q}`).join("\n");
702
+ const overlapsBlock = frame.overlaps.map(
703
+ (o) => `- ${o.domains[0]} + ${o.domains[1]} = **${o.emergent_state}**: ${o.description}`
704
+ ).join("\n");
705
+ sections.push(
706
+ `## How to Think (Analytical Frame: ${lens.name})
707
+
708
+ ${frame.scoring_rubric}
709
+
710
+ ### Evaluation questions to reason through
711
+
712
+ ${questionsBlock}
713
+
714
+ ### Overlap emergent states
715
+
716
+ ${overlapsBlock}
717
+
718
+ ### Center identity
719
+
720
+ When all dimensions integrate fully: **${frame.center_identity}**. Surface this sparingly \u2014 only when the integration is genuinely complete.`
721
+ );
722
+ const vocabPreferred = Object.entries(lens.vocabulary.preferred).map(([generic, native]) => `- "${generic}" \u2192 **${native}**`).join("\n");
723
+ const vocabArchitecture = lens.vocabulary.architecture.map((t) => `\`${t}\``).join(", ");
724
+ const vocabProperNouns = lens.vocabulary.proper_nouns.map((n) => `**${n}**`).join(", ");
725
+ const strategicBlock = lens.strategic_patterns.map((p) => `- ${p}`).join("\n");
726
+ sections.push(
727
+ `## How to Speak (Voice: ${lens.name})
728
+
729
+ Register: ${lens.voice.register}
730
+
731
+ Rules:
732
+ - Active voice: ${lens.voice.active_voice}
733
+ - Named specificity (people, places, numbers): ${lens.voice.specificity}
734
+ - Hype vocabulary: ${lens.voice.hype_vocabulary}
735
+ - Hedging / qualified phrasing: ${lens.voice.hedging}
736
+ - Playfulness: ${lens.voice.playfulness}
737
+ - Close with strategic frame: ${lens.voice.close_with_strategic_frame}
738
+ - Honesty about failure: ${lens.voice.honesty_about_failure}
739
+
740
+ ### Output translation discipline
741
+
742
+ ${lens.voice.output_translation}
743
+
744
+ ### Vocabulary
745
+
746
+ Proper nouns (use literally): ${vocabProperNouns}
747
+
748
+ Preferred term substitutions:
749
+ ${vocabPreferred}
750
+
751
+ Architecture vocabulary: ${vocabArchitecture}
752
+
753
+ ### Strategic decision patterns
754
+
755
+ When recommending action, these patterns reflect how this organization resolves tradeoffs:
756
+
757
+ ${strategicBlock}`
758
+ );
759
+ const forbiddenBlock = lens.forbidden_phrases.map((p) => `- "${p}"`).join("\n");
760
+ sections.push(
761
+ `## Guardrails
762
+
763
+ Do NOT use any of these phrases in your response. If you catch yourself
764
+ reaching for one, rephrase in direct, active, specific language instead.
765
+
766
+ ${forbiddenBlock}
767
+
768
+ If your response would violate a worldmodel invariant, state the conflict
769
+ explicitly and propose an alternative that honors the invariant.`
770
+ );
771
+ return sections.join("\n\n---\n\n");
772
+ }
773
+
774
+ // src/radiant/core/voice-check.ts
775
+ function checkForbiddenPhrases(lens, text) {
776
+ const lower = text.toLowerCase();
777
+ const violations = [];
778
+ for (const phrase of lens.forbidden_phrases) {
779
+ const phraseLower = phrase.toLowerCase();
780
+ let pos = 0;
781
+ while (true) {
782
+ const idx = lower.indexOf(phraseLower, pos);
783
+ if (idx === -1) break;
784
+ violations.push({ phrase, offset: idx });
785
+ pos = idx + phraseLower.length;
786
+ }
787
+ }
788
+ violations.sort((a, b) => a.offset - b.offset);
789
+ return violations;
790
+ }
791
+
792
+ // src/radiant/core/ai.ts
793
+ function createAnthropicAI(apiKey, model = "claude-sonnet-4-20250514", maxTokens = 4096) {
794
+ return {
795
+ async complete(systemPrompt, userQuery) {
796
+ const res = await fetch("https://api.anthropic.com/v1/messages", {
797
+ method: "POST",
798
+ headers: {
799
+ "x-api-key": apiKey,
800
+ "anthropic-version": "2023-06-01",
801
+ "content-type": "application/json"
802
+ },
803
+ body: JSON.stringify({
804
+ model,
805
+ max_tokens: maxTokens,
806
+ system: systemPrompt,
807
+ messages: [{ role: "user", content: userQuery }]
808
+ })
809
+ });
810
+ if (!res.ok) {
811
+ const body = await res.text();
812
+ throw new Error(
813
+ `Anthropic API error ${res.status}: ${body.slice(0, 500)}`
814
+ );
815
+ }
816
+ const data = await res.json();
817
+ const text = data.content?.filter((c) => c.type === "text").map((c) => c.text ?? "").join("");
818
+ if (!text) {
819
+ throw new Error("Anthropic returned no text content");
820
+ }
821
+ return text;
822
+ }
823
+ };
824
+ }
825
+ function createMockAI(fixedResponse) {
826
+ return {
827
+ async complete() {
828
+ return fixedResponse;
829
+ }
830
+ };
831
+ }
832
+
833
+ // src/radiant/core/scopes.ts
834
+ function parseRepoScope(scope) {
835
+ const cleaned = scope.replace(/^https?:\/\//, "").replace(/^github\.com\//, "").replace(/\.git$/, "").replace(/\/$/, "");
836
+ const parts = cleaned.split("/");
837
+ if (parts.length < 2 || !parts[0] || !parts[1]) {
838
+ throw new Error(
839
+ `Cannot parse repo scope: "${scope}". Expected "owner/repo" or a GitHub URL.`
840
+ );
841
+ }
842
+ return { owner: parts[0], repo: parts[1] };
843
+ }
844
+ function formatScope(scope) {
845
+ return `${scope.owner}/${scope.repo}`;
846
+ }
847
+
848
+ // src/radiant/adapters/github.ts
849
+ async function fetchGitHubActivity(scope, token, options = {}) {
850
+ const windowDays = options.windowDays ?? 14;
851
+ const perPage = options.perPage ?? 100;
852
+ const since = new Date(
853
+ Date.now() - windowDays * 24 * 60 * 60 * 1e3
854
+ ).toISOString();
855
+ const base = `https://api.github.com/repos/${formatScope(scope)}`;
856
+ const headers = {
857
+ Authorization: `token ${token}`,
858
+ Accept: "application/vnd.github.v3+json",
859
+ "User-Agent": "neuroverseos-radiant"
860
+ };
861
+ const events = [];
862
+ const [commits, prs, comments] = await Promise.all([
863
+ fetchJSON(
864
+ `${base}/commits?since=${since}&per_page=${perPage}`,
865
+ headers
866
+ ),
867
+ fetchJSON(
868
+ `${base}/pulls?state=all&sort=updated&direction=desc&per_page=${perPage}`,
869
+ headers
870
+ ),
871
+ fetchJSON(
872
+ `${base}/issues/comments?since=${since}&per_page=${perPage}&sort=updated&direction=desc`,
873
+ headers
874
+ )
875
+ ]);
876
+ for (const c of commits) {
877
+ events.push(mapCommit(c, scope));
878
+ }
879
+ const sinceDate = new Date(since);
880
+ for (const pr of prs) {
881
+ if (new Date(pr.updated_at) >= sinceDate) {
882
+ events.push(mapPR(pr, scope));
883
+ }
884
+ }
885
+ for (const comment of comments) {
886
+ events.push(mapComment(comment, scope));
887
+ }
888
+ events.sort(
889
+ (a, b) => Date.parse(a.timestamp) - Date.parse(b.timestamp)
890
+ );
891
+ return events;
892
+ }
893
+ function mapCommit(c, scope) {
894
+ const actor = mapUser(c.author, c.commit.author.name);
895
+ const coActors = extractCoAuthors(c.commit.message);
896
+ return {
897
+ id: `commit-${c.sha.slice(0, 8)}`,
898
+ timestamp: c.commit.author.date,
899
+ actor,
900
+ coActors: coActors.length > 0 ? coActors : void 0,
901
+ kind: "commit",
902
+ content: c.commit.message,
903
+ metadata: {
904
+ scope: formatScope(scope),
905
+ sha: c.sha
906
+ }
907
+ };
908
+ }
909
+ function mapPR(pr, scope) {
910
+ const event = {
911
+ id: `pr-${pr.number}`,
912
+ timestamp: pr.created_at,
913
+ actor: mapUser(pr.user),
914
+ kind: pr.merged_at ? "pr_merged" : pr.state === "open" ? "pr_opened" : "pr_closed",
915
+ content: `${pr.title}
916
+
917
+ ${pr.body ?? ""}`.trim(),
918
+ metadata: {
919
+ scope: formatScope(scope),
920
+ pr_number: pr.number,
921
+ state: pr.state,
922
+ merged_at: pr.merged_at
923
+ }
924
+ };
925
+ if (pr.merged_by && pr.merged_by.login !== pr.user.login) {
926
+ event.actor = mapUser(pr.merged_by);
927
+ event.kind = "pr_merged";
928
+ event.timestamp = pr.merged_at ?? pr.updated_at;
929
+ event.respondsTo = {
930
+ eventId: `pr-${pr.number}-opened`,
931
+ actor: mapUser(pr.user)
932
+ };
933
+ }
934
+ return event;
935
+ }
936
+ function mapComment(comment, scope) {
937
+ const issueMatch = comment.issue_url.match(/\/issues\/(\d+)$/);
938
+ const issueNumber = issueMatch ? issueMatch[1] : "unknown";
939
+ const event = {
940
+ id: `comment-${comment.id}`,
941
+ timestamp: comment.created_at,
942
+ actor: mapUser(comment.user),
943
+ kind: "comment",
944
+ content: comment.body,
945
+ respondsTo: {
946
+ eventId: `pr-${issueNumber}`,
947
+ actor: { id: "unknown", kind: "unknown" }
948
+ },
949
+ metadata: {
950
+ scope: formatScope(scope),
951
+ issue_number: issueNumber
952
+ }
953
+ };
954
+ return event;
955
+ }
956
+ var KNOWN_AI_LOGINS = /* @__PURE__ */ new Set([
957
+ "github-actions[bot]",
958
+ "dependabot[bot]",
959
+ "renovate[bot]",
960
+ "copilot"
961
+ ]);
962
+ var KNOWN_AI_CO_AUTHOR_NAMES = /* @__PURE__ */ new Set([
963
+ "claude",
964
+ "copilot",
965
+ "cursor",
966
+ "codeium",
967
+ "tabnine",
968
+ "codex"
969
+ ]);
970
+ function mapUser(ghUser, fallbackName) {
971
+ if (!ghUser) {
972
+ return {
973
+ id: fallbackName ?? "unknown",
974
+ kind: "unknown",
975
+ name: fallbackName
976
+ };
977
+ }
978
+ let kind = "human";
979
+ if (ghUser.type === "Bot" || ghUser.login.endsWith("[bot]")) {
980
+ kind = "bot";
981
+ }
982
+ if (KNOWN_AI_LOGINS.has(ghUser.login.toLowerCase())) {
983
+ kind = "bot";
984
+ }
985
+ return {
986
+ id: ghUser.login,
987
+ kind,
988
+ name: ghUser.login
989
+ };
990
+ }
991
+ function extractCoAuthors(message) {
992
+ const coAuthors = [];
993
+ const lines = message.split("\n");
994
+ for (const line of lines) {
995
+ const match = line.match(
996
+ /^Co-authored-by:\s*(.+?)\s*<([^>]*)>/i
997
+ );
998
+ if (match) {
999
+ const name = match[1].trim().toLowerCase();
1000
+ const isAI = KNOWN_AI_CO_AUTHOR_NAMES.has(name) || [...KNOWN_AI_CO_AUTHOR_NAMES].some((ai) => name.includes(ai));
1001
+ coAuthors.push({
1002
+ id: match[2] || name,
1003
+ kind: isAI ? "ai" : "human",
1004
+ name: match[1].trim()
1005
+ });
1006
+ }
1007
+ }
1008
+ return coAuthors;
1009
+ }
1010
+ async function fetchJSON(url, headers) {
1011
+ const res = await fetch(url, { headers });
1012
+ if (!res.ok) {
1013
+ if (res.status === 404) return [];
1014
+ if (res.status === 403) {
1015
+ const body = await res.text();
1016
+ if (body.includes("rate limit")) {
1017
+ throw new Error(
1018
+ `GitHub API rate limit exceeded. Wait or use a token with higher limits.`
1019
+ );
1020
+ }
1021
+ }
1022
+ throw new Error(
1023
+ `GitHub API error ${res.status} for ${url}: ${(await res.text()).slice(0, 300)}`
1024
+ );
1025
+ }
1026
+ return await res.json();
1027
+ }
1028
+ function createMockGitHubAdapter(fixedEvents) {
1029
+ return async () => fixedEvents;
1030
+ }
1031
+
1032
+ // src/radiant/adapters/exocortex.ts
1033
+ var import_fs = require("fs");
1034
+ var import_path = require("path");
1035
+ function readExocortex(dirPath) {
1036
+ const dir = (0, import_path.resolve)(dirPath);
1037
+ let filesLoaded = 0;
1038
+ function tryRead(...paths) {
1039
+ for (const p of paths) {
1040
+ const full = (0, import_path.join)(dir, p);
1041
+ if ((0, import_fs.existsSync)(full)) {
1042
+ try {
1043
+ const content = (0, import_fs.readFileSync)(full, "utf-8").trim();
1044
+ if (content) {
1045
+ filesLoaded++;
1046
+ return content;
1047
+ }
1048
+ } catch {
1049
+ }
1050
+ }
1051
+ }
1052
+ return null;
1053
+ }
1054
+ const ctx = {
1055
+ attention: tryRead("attention.md"),
1056
+ goals: tryRead("goals.md"),
1057
+ identity: tryRead("identity.md"),
1058
+ sprint: tryRead("sprint.md", "src/sprint.md"),
1059
+ organization: tryRead("org/organization.md", "org/src/organization.md"),
1060
+ methods: tryRead("org/methods.md", "org/src/methods.md"),
1061
+ source: dir,
1062
+ filesLoaded
1063
+ };
1064
+ return ctx;
1065
+ }
1066
+ function formatExocortexForPrompt(ctx) {
1067
+ if (ctx.filesLoaded === 0) return "";
1068
+ const sections = [];
1069
+ sections.push(
1070
+ "## Stated Intent (from exocortex)\n\nThe following is what the person/team SAYS they are doing, focused on, and working toward. Compare this against the ACTUAL activity from GitHub. Where stated intent and observed behavior diverge, that gap is the most valuable signal in this read. Name it directly."
1071
+ );
1072
+ if (ctx.attention) {
1073
+ sections.push(`### Current attention
1074
+
1075
+ ${ctx.attention}`);
1076
+ }
1077
+ if (ctx.goals) {
1078
+ sections.push(`### Goals
1079
+
1080
+ ${ctx.goals}`);
1081
+ }
1082
+ if (ctx.sprint) {
1083
+ sections.push(`### Sprint focus
1084
+
1085
+ ${ctx.sprint}`);
1086
+ }
1087
+ if (ctx.identity) {
1088
+ sections.push(`### Identity and values
1089
+
1090
+ ${ctx.identity}`);
1091
+ }
1092
+ if (ctx.organization) {
1093
+ sections.push(`### Organization
1094
+
1095
+ ${ctx.organization}`);
1096
+ }
1097
+ if (ctx.methods) {
1098
+ sections.push(`### Methods
1099
+
1100
+ ${ctx.methods}`);
1101
+ }
1102
+ return sections.join("\n\n");
1103
+ }
1104
+ function summarizeExocortex(ctx) {
1105
+ if (ctx.filesLoaded === 0) return "no exocortex files found";
1106
+ const loaded = [];
1107
+ if (ctx.attention) loaded.push("attention");
1108
+ if (ctx.goals) loaded.push("goals");
1109
+ if (ctx.sprint) loaded.push("sprint");
1110
+ if (ctx.identity) loaded.push("identity");
1111
+ if (ctx.organization) loaded.push("org");
1112
+ if (ctx.methods) loaded.push("methods");
1113
+ return `${loaded.join(", ")} (${ctx.filesLoaded} files)`;
1114
+ }
1115
+
1116
+ // src/radiant/core/patterns.ts
1117
+ async function interpretPatterns(input) {
1118
+ const prompt = buildInterpretationPrompt(input);
1119
+ const raw = await input.ai.complete(prompt, "Analyze the activity and produce the read.");
1120
+ const parsed = parseInterpretation(raw, input.canonicalPatterns ?? []);
1121
+ return {
1122
+ patterns: parsed.patterns,
1123
+ meaning: parsed.meaning,
1124
+ move: parsed.move,
1125
+ raw_ai_response: raw
1126
+ };
1127
+ }
1128
+ function buildInterpretationPrompt(input) {
1129
+ const signalSummary = formatSignalSummary(input.signals);
1130
+ const eventSample = formatEventSample(input.events, 30);
1131
+ const canonicalList = (input.canonicalPatterns ?? []).length > 0 ? `Patterns the organization has already named (use these names if you see them):
1132
+ ${input.canonicalPatterns.map((p) => `- ${p}`).join("\n")}` : "No patterns have been named yet. Everything you observe is new.";
1133
+ const frame = input.lens.primary_frame;
1134
+ const evalQuestions = frame.evaluation_questions.map((q, i) => `${i + 1}. ${q}`).join("\n");
1135
+ const forbiddenList = input.lens.forbidden_phrases.map((p) => `- "${p}"`).join("\n");
1136
+ const jargonTable = Object.entries(input.lens.vocabulary.jargon_translations).map(([internal, plain]) => ` "${internal}" \u2192 "${plain}"`).join("\n");
1137
+ return `You are a behavioral intelligence system reading team activity and producing a read for the reader who needs to act on it.
1138
+
1139
+ ## Context the reader has loaded
1140
+
1141
+ ${input.worldmodelContent}
1142
+
1143
+ ## What happened this window
1144
+
1145
+ ### Signal matrix (what Radiant measured)
1146
+
1147
+ ${signalSummary}
1148
+
1149
+ ### Recent events (sample)
1150
+
1151
+ ${eventSample}
1152
+
1153
+ ## How to reason
1154
+
1155
+ Reason through these questions INTERNALLY \u2014 do not list them in your output:
1156
+
1157
+ ${evalQuestions}
1158
+
1159
+ Scoring rubric: ${frame.scoring_rubric}
1160
+
1161
+ ${canonicalList}
1162
+
1163
+ ${input.statedIntent ? input.statedIntent + "\n" : ""}## Voice: speak like an Auki builder, not like a status report
1164
+
1165
+ The reader wants to know **what this means and what to do**, not "what happened." Frame every observation as consequence + implication, not just description.
1166
+
1167
+ Wrong voice (status report):
1168
+ "Rapid deployment of complex technical architecture through composable commits."
1169
+ "Signal extraction across life, cyber, and joint domains enables consistent behavioral analysis."
1170
+ "Decision momentum scores suggest architectural delivery without corresponding strategic direction setting."
1171
+
1172
+ Right voice (Auki builder):
1173
+ "Shipping pace is high. The architecture is getting ahead of strategic decisions \u2014 velocity without a declared target."
1174
+ "Every pattern is new. Nothing is being tracked by name yet. That's fine for now; it becomes a problem when patterns repeat and you still don't have vocabulary for them."
1175
+ "The work is converging across three modules. The story of HOW they compose isn't being told yet."
1176
+
1177
+ The difference: consequence in plain English, not observation in system vocabulary.
1178
+
1179
+ ## Translate internal jargon to plain English
1180
+
1181
+ Readers don't know Radiant's vocabulary. Before ANY description appears in your output, translate these:
1182
+
1183
+ ${jargonTable}
1184
+
1185
+ For example: don't say "update the worldmodel." Say "add a line to your strategy file."
1186
+
1187
+ ## Health is a valid read
1188
+
1189
+ If the activity is healthy and aligned with the worldmodel, SAY SO. Don't fabricate problems. Over-prescription is a voice failure. Legitimate outputs include:
1190
+
1191
+ "Nothing's broken. Keep shipping."
1192
+ "This is what healthy looks like \u2014 the invariants are holding."
1193
+ "Nothing here needs action."
1194
+
1195
+ Only recommend a move when the evidence actually calls for one.
1196
+
1197
+ ## Output schema \u2014 JSON object
1198
+
1199
+ \`\`\`json
1200
+ {
1201
+ "patterns": [
1202
+ {
1203
+ "name": "pattern_name_snake_case",
1204
+ "type": "canonical" | "candidate",
1205
+ "description": "Consequence-framed, plain-English, 1-2 sentences. The reader understands why this matters, not just what you observed.",
1206
+ "evidence": {
1207
+ "signals": ["signal_id.domain", ...],
1208
+ "events": ["event_id", ...],
1209
+ "cited_invariant": "invariant_name_or_null"
1210
+ },
1211
+ "confidence": 0.0 to 1.0
1212
+ }
1213
+ ],
1214
+ "meaning": "3-5 sentences. Weave the patterns into ONE strategic thesis. Compress. The reader should finish this paragraph and understand the one thing that matters most in this read. Plain English \u2014 no system jargon.",
1215
+ "move": "1-3 direct imperatives, OR explicit 'nothing to act on' if the read is healthy. Do not fabricate urgency. Examples: 'Force cross-module ownership this sprint.' / 'Nothing's broken. Keep shipping.' / 'If you want future reads to track this pattern by name, add a line to your strategy file.'"
1216
+ }
1217
+ \`\`\`
1218
+
1219
+ ## Hard rules
1220
+
1221
+ - Every signal you cite MUST appear in the signal matrix above
1222
+ - Every event you cite MUST appear in the events sample above
1223
+ - Do not invent signals or events that aren't in the data
1224
+ - Candidate patterns must have type "candidate"
1225
+ - No hedging, no hype vocabulary
1226
+ - Apply jargon translation before output
1227
+ - Health-is-valid \u2014 don't invent problems
1228
+ - Return ONLY the JSON object, no other text
1229
+
1230
+ Do NOT use these phrases anywhere in your output:
1231
+ ${forbiddenList}`;
1232
+ }
1233
+ function formatSignalSummary(signals) {
1234
+ const lines = [];
1235
+ const domains = ["life", "cyber", "joint"];
1236
+ for (const domain of domains) {
1237
+ const domainSignals = signals.filter((s) => s.domain === domain);
1238
+ if (domainSignals.length === 0) continue;
1239
+ lines.push(`### ${domain}`);
1240
+ for (const s of domainSignals) {
1241
+ const gate = s.eventCount >= 3 && s.confidence >= 0.5 ? "\u2713" : "\u25CB";
1242
+ lines.push(
1243
+ ` ${gate} ${s.id}: score=${s.score.toFixed(1)}, events=${s.eventCount}, conf=${s.confidence.toFixed(2)}`
1244
+ );
1245
+ }
1246
+ }
1247
+ return lines.join("\n");
1248
+ }
1249
+ function formatEventSample(events, maxEvents) {
1250
+ const sample = events.slice(-maxEvents);
1251
+ return sample.map((e) => {
1252
+ const content = (e.event.content ?? "").slice(0, 200);
1253
+ const respondsTo = e.event.respondsTo ? ` (responds to ${e.event.respondsTo.eventId})` : "";
1254
+ return `- [${e.domain}] ${e.event.id} | ${e.event.actor.kind}:${e.event.actor.id} | ${e.event.kind ?? "event"}${respondsTo}
1255
+ "${content}"`;
1256
+ }).join("\n");
1257
+ }
1258
+ function parseInterpretation(raw, canonicalNames) {
1259
+ let meaning = "";
1260
+ let move = "";
1261
+ let patternsArray = [];
1262
+ const objMatch = raw.match(/\{[\s\S]*"patterns"[\s\S]*\}/);
1263
+ if (objMatch) {
1264
+ try {
1265
+ const obj = JSON.parse(objMatch[0]);
1266
+ if (Array.isArray(obj.patterns)) {
1267
+ patternsArray = obj.patterns;
1268
+ }
1269
+ if (typeof obj.meaning === "string") meaning = obj.meaning;
1270
+ if (typeof obj.move === "string") move = obj.move;
1271
+ } catch {
1272
+ }
1273
+ }
1274
+ if (patternsArray.length === 0) {
1275
+ const arrMatch = raw.match(/\[[\s\S]*\]/);
1276
+ if (arrMatch) {
1277
+ try {
1278
+ const arr = JSON.parse(arrMatch[0]);
1279
+ if (Array.isArray(arr)) patternsArray = arr;
1280
+ } catch {
1281
+ }
1282
+ }
1283
+ }
1284
+ const canonicalSet = new Set(canonicalNames.map((n) => n.toLowerCase()));
1285
+ const patterns = [];
1286
+ for (const item of patternsArray) {
1287
+ if (!isPatternLike(item)) continue;
1288
+ const nameStr = String(item.name ?? "unnamed");
1289
+ const ev = item.evidence;
1290
+ const isCanonical = item.type === "canonical" || canonicalSet.has(nameStr.toLowerCase());
1291
+ patterns.push({
1292
+ name: nameStr,
1293
+ type: isCanonical ? "canonical" : "candidate",
1294
+ declaredAs: isCanonical ? nameStr : void 0,
1295
+ description: String(item.description ?? ""),
1296
+ evidence: {
1297
+ signals: Array.isArray(ev?.signals) ? ev.signals.map(String) : [],
1298
+ events: Array.isArray(ev?.events) ? ev.events.map(String) : [],
1299
+ cited_invariant: ev?.cited_invariant ? String(ev.cited_invariant) : void 0
1300
+ },
1301
+ confidence: typeof item.confidence === "number" ? Math.max(0, Math.min(1, item.confidence)) : 0.5
1302
+ });
1303
+ }
1304
+ return { patterns, meaning, move };
1305
+ }
1306
+ function isPatternLike(x) {
1307
+ return typeof x === "object" && x !== null && "name" in x;
1308
+ }
1309
+
1310
+ // src/radiant/core/renderer.ts
1311
+ function render(input) {
1312
+ const text = renderText(input);
1313
+ const frontmatter = renderFrontmatter(input);
1314
+ return { text, frontmatter };
1315
+ }
1316
+ function renderText(input) {
1317
+ const sections = [];
1318
+ sections.push(
1319
+ `Scope: ${formatScope(input.scope)}
1320
+ Window: last ${input.windowDays} days \xB7 ${input.eventCount} events
1321
+ Lens: ${input.lens.name}`
1322
+ );
1323
+ if (input.patterns.length > 0) {
1324
+ const canonical = input.patterns.filter((p) => p.type === "canonical");
1325
+ const candidates = input.patterns.filter((p) => p.type === "candidate");
1326
+ let emergentBlock = "EMERGENT\n";
1327
+ if (canonical.length > 0) {
1328
+ for (const p of canonical) {
1329
+ emergentBlock += `
1330
+ ${p.name}
1331
+ `;
1332
+ emergentBlock += ` ${p.description}
1333
+ `;
1334
+ }
1335
+ }
1336
+ if (candidates.length > 0) {
1337
+ emergentBlock += "\n Emergent (candidates \u2014 not yet in worldmodel)\n";
1338
+ for (const p of candidates) {
1339
+ emergentBlock += `
1340
+ ${p.name} (candidate)
1341
+ `;
1342
+ emergentBlock += ` ${p.description}
1343
+ `;
1344
+ if (p.evidence.cited_invariant) {
1345
+ emergentBlock += ` Cited invariant: ${p.evidence.cited_invariant}
1346
+ `;
1347
+ }
1348
+ }
1349
+ }
1350
+ sections.push(emergentBlock.trimEnd());
1351
+ }
1352
+ if (input.meaning) {
1353
+ sections.push(`MEANING
1354
+
1355
+ ${input.meaning.split("\n").join("\n ")}`);
1356
+ }
1357
+ if (input.move) {
1358
+ sections.push(`MOVE
1359
+
1360
+ ${input.move.split("\n").join("\n ")}`);
1361
+ }
1362
+ const alignBlock = [
1363
+ "ALIGNMENT",
1364
+ "",
1365
+ ` Human work: ${formatScore(input.scores.A_L)}`,
1366
+ ` AI work: ${formatScore(input.scores.A_C)}`,
1367
+ ` Human\u2013AI collaboration: ${formatScore(input.scores.A_N)}`,
1368
+ ` Composite: ${formatScore(input.scores.R)}`
1369
+ ].join("\n");
1370
+ sections.push(alignBlock);
1371
+ sections.push(renderDepth(input.priorReadCount ?? 0, input.windowDays));
1372
+ return sections.join("\n\n");
1373
+ }
1374
+ function renderDepth(priorReads, windowDays) {
1375
+ if (priorReads === 0) {
1376
+ return [
1377
+ "DEPTH",
1378
+ "",
1379
+ ` This is your first read. Radiant sees ${windowDays} days of activity`,
1380
+ " but has no prior baseline to compare against.",
1381
+ "",
1382
+ " Available now:",
1383
+ " \u2713 Signal extraction across life / cyber / joint domains",
1384
+ " \u2713 Pattern identification (canonical + candidates)",
1385
+ " \u2713 Alignment scoring",
1386
+ "",
1387
+ " Available after 2+ reads:",
1388
+ " \xB7 Drift detection (is alignment improving or degrading?)",
1389
+ ' \xB7 Baselines (what does "normal" look like for this team?)',
1390
+ " \xB7 Pattern confidence (are these patterns persistent or noise?)",
1391
+ " \xB7 Evolution proposals (should the worldmodel adapt?)",
1392
+ "",
1393
+ " Run again next week. The read gets sharper every time."
1394
+ ].join("\n");
1395
+ }
1396
+ if (priorReads < 4) {
1397
+ return [
1398
+ "DEPTH",
1399
+ "",
1400
+ ` Read ${priorReads + 1} of this scope. Baseline forming.`,
1401
+ "",
1402
+ " Available now:",
1403
+ " \u2713 Signal extraction + pattern identification + alignment scoring",
1404
+ ` \u2713 Drift detection (comparing against ${priorReads} prior read${priorReads > 1 ? "s" : ""})`,
1405
+ " \xB7 Baselines stabilizing (need 4+ reads for reliable averages)",
1406
+ " \xB7 Pattern confidence accumulating",
1407
+ "",
1408
+ " The read sharpens with each run."
1409
+ ].join("\n");
1410
+ }
1411
+ return [
1412
+ "DEPTH",
1413
+ "",
1414
+ ` Read ${priorReads + 1} of this scope. Baseline established.`,
1415
+ "",
1416
+ " Available:",
1417
+ " \u2713 Signal extraction + pattern identification + alignment scoring",
1418
+ " \u2713 Drift detection against established baseline",
1419
+ " \u2713 Pattern confidence (persistent vs noise)",
1420
+ " \u2713 Evolution proposals (candidate patterns with enough history to evaluate)"
1421
+ ].join("\n");
1422
+ }
1423
+ function formatScore(s) {
1424
+ if (!isScored(s)) {
1425
+ if (s === "UNAVAILABLE") return "not available (no worldmodel loaded)";
1426
+ return "not enough signal to call yet";
1427
+ }
1428
+ const n = Math.round(s);
1429
+ let label;
1430
+ if (n >= 75) label = "STRONG";
1431
+ else if (n >= 60) label = "STABLE";
1432
+ else if (n >= 45) label = "needs attention";
1433
+ else if (n >= 30) label = "concerning";
1434
+ else label = "critical";
1435
+ return `${n} \xB7 ${label}`;
1436
+ }
1437
+ function renderFrontmatter(input) {
1438
+ const now = (/* @__PURE__ */ new Date()).toISOString();
1439
+ const signalsByDomain = groupSignalsByDomain(input.signals);
1440
+ const patternEntries = input.patterns.map((p) => {
1441
+ const entry = {
1442
+ name: p.name,
1443
+ type: p.type,
1444
+ conf: Number(p.confidence.toFixed(2)),
1445
+ evidence_signals: p.evidence.signals,
1446
+ evidence_events: p.evidence.events
1447
+ };
1448
+ if (p.evidence.cited_invariant) {
1449
+ entry.cited_invariant = p.evidence.cited_invariant;
1450
+ }
1451
+ return entry;
1452
+ });
1453
+ const frontmatter = {
1454
+ radiant_read: {
1455
+ scope: formatScope(input.scope),
1456
+ window: `${input.windowDays}d`,
1457
+ timestamp: now,
1458
+ lens: input.lens.name
1459
+ },
1460
+ events: {
1461
+ total: input.eventCount
1462
+ },
1463
+ signals: signalsByDomain,
1464
+ scores: {
1465
+ A_L: isScored(input.scores.A_L) ? Math.round(input.scores.A_L) : String(input.scores.A_L),
1466
+ A_C: isScored(input.scores.A_C) ? Math.round(input.scores.A_C) : String(input.scores.A_C),
1467
+ A_N: isScored(input.scores.A_N) ? Math.round(input.scores.A_N) : String(input.scores.A_N),
1468
+ R: isScored(input.scores.R) ? Math.round(input.scores.R) : String(input.scores.R)
1469
+ },
1470
+ patterns: patternEntries
1471
+ };
1472
+ return "---\n" + serializeYAML(frontmatter) + "---";
1473
+ }
1474
+ function groupSignalsByDomain(signals) {
1475
+ const result = {};
1476
+ for (const s of signals) {
1477
+ if (!result[s.domain]) result[s.domain] = {};
1478
+ result[s.domain][s.id] = {
1479
+ score: Number(s.score.toFixed(1)),
1480
+ n: s.eventCount,
1481
+ conf: Number(s.confidence.toFixed(2))
1482
+ };
1483
+ }
1484
+ return result;
1485
+ }
1486
+ function serializeYAML(obj, indent = 0) {
1487
+ const pad = " ".repeat(indent);
1488
+ if (obj === null || obj === void 0) return "null\n";
1489
+ if (typeof obj === "string") return `${JSON.stringify(obj)}
1490
+ `;
1491
+ if (typeof obj === "number" || typeof obj === "boolean") return `${obj}
1492
+ `;
1493
+ if (Array.isArray(obj)) {
1494
+ if (obj.length === 0) return "[]\n";
1495
+ if (obj.every((item) => typeof item === "string" || typeof item === "number")) {
1496
+ return `[${obj.map((item) => JSON.stringify(item)).join(", ")}]
1497
+ `;
1498
+ }
1499
+ let result = "\n";
1500
+ for (const item of obj) {
1501
+ if (typeof item === "object" && item !== null && !Array.isArray(item)) {
1502
+ const entries = Object.entries(item);
1503
+ result += `${pad}- ${entries[0][0]}: ${serializeYAML(entries[0][1], 0).trim()}
1504
+ `;
1505
+ for (let i = 1; i < entries.length; i++) {
1506
+ result += `${pad} ${entries[i][0]}: ${serializeYAML(entries[i][1], indent + 2).trim()}
1507
+ `;
1508
+ }
1509
+ } else {
1510
+ result += `${pad}- ${serializeYAML(item, indent + 1).trim()}
1511
+ `;
1512
+ }
1513
+ }
1514
+ return result;
1515
+ }
1516
+ if (typeof obj === "object") {
1517
+ const entries = Object.entries(obj);
1518
+ if (entries.length === 0) return "{}\n";
1519
+ let result = "\n";
1520
+ for (const [key, value] of entries) {
1521
+ if (typeof value === "object" && value !== null) {
1522
+ result += `${pad}${key}:${serializeYAML(value, indent + 1)}`;
1523
+ } else {
1524
+ result += `${pad}${key}: ${serializeYAML(value, indent).trim()}
1525
+ `;
1526
+ }
1527
+ }
1528
+ return result;
1529
+ }
1530
+ return `${obj}
1531
+ `;
1532
+ }
1533
+
1534
+ // src/radiant/commands/think.ts
1535
+ async function think(input) {
1536
+ const lens = resolveLens(input.lensId);
1537
+ const systemPrompt = composeSystemPrompt(input.worldmodelContent, lens);
1538
+ const response = await input.ai.complete(systemPrompt, input.query);
1539
+ const voiceViolations = checkForbiddenPhrases(lens, response);
1540
+ return {
1541
+ response,
1542
+ lens: lens.name,
1543
+ voiceViolations,
1544
+ voiceClean: voiceViolations.length === 0,
1545
+ systemPrompt
1546
+ };
1547
+ }
1548
+ function resolveLens(id) {
1549
+ const lens = getLens(id);
1550
+ if (!lens) {
1551
+ const available = Object.keys(
1552
+ // Inline import-free way to list. At runtime, getLens returns from
1553
+ // the same LENSES record — we just need the keys for the error message.
1554
+ // We re-import getLens from lenses/index which exposes listLenses, but
1555
+ // since we already have lens===undefined we know the id was wrong.
1556
+ {}
1557
+ );
1558
+ throw new Error(
1559
+ `Lens "${id}" not found. Check the id or register the lens in src/radiant/lenses/index.ts.`
1560
+ );
1561
+ }
1562
+ return lens;
1563
+ }
1564
+
1565
+ // src/radiant/commands/emergent.ts
1566
+ async function emergent(input) {
1567
+ const lens = resolveLens2(input.lensId);
1568
+ const windowDays = input.windowDays ?? 14;
1569
+ let statedIntent;
1570
+ let exocortexContext;
1571
+ if (input.exocortexPath) {
1572
+ exocortexContext = readExocortex(input.exocortexPath);
1573
+ const formatted = formatExocortexForPrompt(exocortexContext);
1574
+ if (formatted) statedIntent = formatted;
1575
+ }
1576
+ const events = await fetchGitHubActivity(input.scope, input.githubToken, {
1577
+ windowDays
1578
+ });
1579
+ const classified = classifyEvents(events);
1580
+ const signals = extractSignals(classified);
1581
+ const scores = computeScores(signals, input.worldmodelContent !== "");
1582
+ const { patterns, meaning, move } = await interpretPatterns({
1583
+ signals,
1584
+ events: classified,
1585
+ worldmodelContent: input.worldmodelContent,
1586
+ lens,
1587
+ ai: input.ai,
1588
+ canonicalPatterns: input.canonicalPatterns,
1589
+ statedIntent
1590
+ });
1591
+ const rewrittenPatterns = patterns.map((p) => lens.rewrite(p));
1592
+ const allDescriptions = rewrittenPatterns.map((p) => p.description).join("\n");
1593
+ const voiceViolations = checkForbiddenPhrases(lens, allDescriptions);
1594
+ const rendered = render({
1595
+ scope: input.scope,
1596
+ windowDays,
1597
+ eventCount: events.length,
1598
+ signals,
1599
+ patterns: rewrittenPatterns,
1600
+ scores,
1601
+ lens,
1602
+ meaning: meaning || void 0,
1603
+ move: move || void 0
1604
+ });
1605
+ return {
1606
+ text: rendered.text,
1607
+ frontmatter: rendered.frontmatter,
1608
+ voiceViolations,
1609
+ voiceClean: voiceViolations.length === 0,
1610
+ signals,
1611
+ scores,
1612
+ eventCount: events.length
1613
+ };
1614
+ }
1615
+ function computeScores(signals, worldmodelLoaded) {
1616
+ const gate = DEFAULT_EVIDENCE_GATE;
1617
+ const lifeSignals = signals.filter((s) => s.domain === "life");
1618
+ const A_L = scoreLife(
1619
+ { dimensions: lifeSignals.map(signalToDimension) },
1620
+ gate
1621
+ );
1622
+ const cyberSignals = signals.filter((s) => s.domain === "cyber");
1623
+ const A_C = scoreCyber(
1624
+ { dimensions: cyberSignals.map(signalToDimension) },
1625
+ gate
1626
+ );
1627
+ const jointSignals = signals.filter((s) => s.domain === "joint");
1628
+ const A_N = scoreNeuroVerse(
1629
+ jointSignals.map(signalToBridging),
1630
+ worldmodelLoaded,
1631
+ gate
1632
+ );
1633
+ const R = scoreComposite(A_L, A_C, A_N);
1634
+ return { A_L, A_C, A_N, R };
1635
+ }
1636
+ function signalToDimension(s) {
1637
+ return {
1638
+ id: s.id,
1639
+ score: s.score,
1640
+ eventCount: s.eventCount,
1641
+ confidence: s.confidence
1642
+ };
1643
+ }
1644
+ function signalToBridging(s) {
1645
+ return {
1646
+ component: "ALIGN",
1647
+ // Proxy: joint signals → ALIGN component
1648
+ score: s.score,
1649
+ eventCount: s.eventCount,
1650
+ confidence: s.confidence
1651
+ };
1652
+ }
1653
+ function resolveLens2(id) {
1654
+ const lens = getLens(id);
1655
+ if (!lens) {
1656
+ throw new Error(
1657
+ `Lens "${id}" not found. Check the id or register the lens.`
1658
+ );
1659
+ }
1660
+ return lens;
1661
+ }
1662
+
1663
+ // src/radiant/index.ts
1664
+ var RADIANT_PACKAGE_VERSION = "0.0.0";
1665
+ // Annotate the CommonJS export names for ESM import in node:
1666
+ 0 && (module.exports = {
1667
+ DEFAULT_EVIDENCE_GATE,
1668
+ DEFAULT_SIGNAL_EXTRACTORS,
1669
+ LENSES,
1670
+ RADIANT_PACKAGE_VERSION,
1671
+ aukiBuilderLens,
1672
+ checkForbiddenPhrases,
1673
+ classifyActorDomain,
1674
+ classifyEvents,
1675
+ composeSystemPrompt,
1676
+ createAnthropicAI,
1677
+ createMockAI,
1678
+ createMockGitHubAdapter,
1679
+ emergent,
1680
+ extractSignals,
1681
+ fetchGitHubActivity,
1682
+ formatExocortexForPrompt,
1683
+ formatScope,
1684
+ getLens,
1685
+ interpretPatterns,
1686
+ isPresent,
1687
+ isScored,
1688
+ isSentinel,
1689
+ listLenses,
1690
+ parseRepoScope,
1691
+ presenceAverage,
1692
+ readExocortex,
1693
+ render,
1694
+ scoreComposite,
1695
+ scoreCyber,
1696
+ scoreLife,
1697
+ scoreNeuroVerse,
1698
+ summarizeExocortex,
1699
+ think
1700
+ });