@neuroverseos/governance 0.6.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1208 @@
1
+ import {
2
+ getLens
3
+ } from "./chunk-VGFDMPVB.js";
4
+
5
+ // src/radiant/core/prompt.ts
6
+ function composeSystemPrompt(worldmodelContent, lens) {
7
+ const sections = [];
8
+ sections.push(
9
+ `## Worldmodel
10
+
11
+ You are operating inside a governed environment. The worldmodel below
12
+ defines the invariants, signals, decision priorities, and behavioral
13
+ expectations for this organization. Every response you produce must
14
+ be grounded in this worldmodel.
15
+
16
+ ` + worldmodelContent
17
+ );
18
+ const frame = lens.primary_frame;
19
+ const questionsBlock = frame.evaluation_questions.map((q, i) => `${i + 1}. ${q}`).join("\n");
20
+ const overlapsBlock = frame.overlaps.map(
21
+ (o) => `- ${o.domains[0]} + ${o.domains[1]} = **${o.emergent_state}**: ${o.description}`
22
+ ).join("\n");
23
+ sections.push(
24
+ `## How to Think (Analytical Frame: ${lens.name})
25
+
26
+ ${frame.scoring_rubric}
27
+
28
+ ### Evaluation questions to reason through
29
+
30
+ ${questionsBlock}
31
+
32
+ ### Overlap emergent states
33
+
34
+ ${overlapsBlock}
35
+
36
+ ### Center identity
37
+
38
+ When all dimensions integrate fully: **${frame.center_identity}**. Surface this sparingly \u2014 only when the integration is genuinely complete.`
39
+ );
40
+ const vocabPreferred = Object.entries(lens.vocabulary.preferred).map(([generic, native]) => `- "${generic}" \u2192 **${native}**`).join("\n");
41
+ const vocabArchitecture = lens.vocabulary.architecture.map((t) => `\`${t}\``).join(", ");
42
+ const vocabProperNouns = lens.vocabulary.proper_nouns.map((n) => `**${n}**`).join(", ");
43
+ const strategicBlock = lens.strategic_patterns.map((p) => `- ${p}`).join("\n");
44
+ sections.push(
45
+ `## How to Speak (Voice: ${lens.name})
46
+
47
+ Register: ${lens.voice.register}
48
+
49
+ Rules:
50
+ - Active voice: ${lens.voice.active_voice}
51
+ - Named specificity (people, places, numbers): ${lens.voice.specificity}
52
+ - Hype vocabulary: ${lens.voice.hype_vocabulary}
53
+ - Hedging / qualified phrasing: ${lens.voice.hedging}
54
+ - Playfulness: ${lens.voice.playfulness}
55
+ - Close with strategic frame: ${lens.voice.close_with_strategic_frame}
56
+ - Honesty about failure: ${lens.voice.honesty_about_failure}
57
+
58
+ ### Output translation discipline
59
+
60
+ ${lens.voice.output_translation}
61
+
62
+ ### Vocabulary
63
+
64
+ Proper nouns (use literally): ${vocabProperNouns}
65
+
66
+ Preferred term substitutions:
67
+ ${vocabPreferred}
68
+
69
+ Architecture vocabulary: ${vocabArchitecture}
70
+
71
+ ### Strategic decision patterns
72
+
73
+ When recommending action, these patterns reflect how this organization resolves tradeoffs:
74
+
75
+ ${strategicBlock}`
76
+ );
77
+ const forbiddenBlock = lens.forbidden_phrases.map((p) => `- "${p}"`).join("\n");
78
+ sections.push(
79
+ `## Guardrails
80
+
81
+ Do NOT use any of these phrases in your response. If you catch yourself
82
+ reaching for one, rephrase in direct, active, specific language instead.
83
+
84
+ ${forbiddenBlock}
85
+
86
+ If your response would violate a worldmodel invariant, state the conflict
87
+ explicitly and propose an alternative that honors the invariant.`
88
+ );
89
+ return sections.join("\n\n---\n\n");
90
+ }
91
+
92
+ // src/radiant/core/voice-check.ts
93
+ function checkForbiddenPhrases(lens, text) {
94
+ const lower = text.toLowerCase();
95
+ const violations = [];
96
+ for (const phrase of lens.forbidden_phrases) {
97
+ const phraseLower = phrase.toLowerCase();
98
+ let pos = 0;
99
+ while (true) {
100
+ const idx = lower.indexOf(phraseLower, pos);
101
+ if (idx === -1) break;
102
+ violations.push({ phrase, offset: idx });
103
+ pos = idx + phraseLower.length;
104
+ }
105
+ }
106
+ violations.sort((a, b) => a.offset - b.offset);
107
+ return violations;
108
+ }
109
+
110
+ // src/radiant/commands/think.ts
111
+ async function think(input) {
112
+ const lens = resolveLens(input.lensId);
113
+ const systemPrompt = composeSystemPrompt(input.worldmodelContent, lens);
114
+ const response = await input.ai.complete(systemPrompt, input.query);
115
+ const voiceViolations = checkForbiddenPhrases(lens, response);
116
+ return {
117
+ response,
118
+ lens: lens.name,
119
+ voiceViolations,
120
+ voiceClean: voiceViolations.length === 0,
121
+ systemPrompt
122
+ };
123
+ }
124
+ function resolveLens(id) {
125
+ const lens = getLens(id);
126
+ if (!lens) {
127
+ const available = Object.keys(
128
+ // Inline import-free way to list. At runtime, getLens returns from
129
+ // the same LENSES record — we just need the keys for the error message.
130
+ // We re-import getLens from lenses/index which exposes listLenses, but
131
+ // since we already have lens===undefined we know the id was wrong.
132
+ {}
133
+ );
134
+ throw new Error(
135
+ `Lens "${id}" not found. Check the id or register the lens in src/radiant/lenses/index.ts.`
136
+ );
137
+ }
138
+ return lens;
139
+ }
140
+
141
+ // src/radiant/core/scopes.ts
142
+ function parseRepoScope(scope) {
143
+ const cleaned = scope.replace(/^https?:\/\//, "").replace(/^github\.com\//, "").replace(/\.git$/, "").replace(/\/$/, "");
144
+ const parts = cleaned.split("/");
145
+ if (parts.length < 2 || !parts[0] || !parts[1]) {
146
+ throw new Error(
147
+ `Cannot parse repo scope: "${scope}". Expected "owner/repo" or a GitHub URL.`
148
+ );
149
+ }
150
+ return { owner: parts[0], repo: parts[1] };
151
+ }
152
+ function formatScope(scope) {
153
+ return `${scope.owner}/${scope.repo}`;
154
+ }
155
+
156
+ // src/radiant/adapters/github.ts
157
+ async function fetchGitHubActivity(scope, token, options = {}) {
158
+ const windowDays = options.windowDays ?? 14;
159
+ const perPage = options.perPage ?? 100;
160
+ const since = new Date(
161
+ Date.now() - windowDays * 24 * 60 * 60 * 1e3
162
+ ).toISOString();
163
+ const base = `https://api.github.com/repos/${formatScope(scope)}`;
164
+ const headers = {
165
+ Authorization: `token ${token}`,
166
+ Accept: "application/vnd.github.v3+json",
167
+ "User-Agent": "neuroverseos-radiant"
168
+ };
169
+ const events = [];
170
+ const [commits, prs, comments] = await Promise.all([
171
+ fetchJSON(
172
+ `${base}/commits?since=${since}&per_page=${perPage}`,
173
+ headers
174
+ ),
175
+ fetchJSON(
176
+ `${base}/pulls?state=all&sort=updated&direction=desc&per_page=${perPage}`,
177
+ headers
178
+ ),
179
+ fetchJSON(
180
+ `${base}/issues/comments?since=${since}&per_page=${perPage}&sort=updated&direction=desc`,
181
+ headers
182
+ )
183
+ ]);
184
+ for (const c of commits) {
185
+ events.push(mapCommit(c, scope));
186
+ }
187
+ const sinceDate = new Date(since);
188
+ for (const pr of prs) {
189
+ if (new Date(pr.updated_at) >= sinceDate) {
190
+ events.push(mapPR(pr, scope));
191
+ }
192
+ }
193
+ for (const comment of comments) {
194
+ events.push(mapComment(comment, scope));
195
+ }
196
+ events.sort(
197
+ (a, b) => Date.parse(a.timestamp) - Date.parse(b.timestamp)
198
+ );
199
+ return events;
200
+ }
201
+ function mapCommit(c, scope) {
202
+ const actor = mapUser(c.author, c.commit.author.name);
203
+ const coActors = extractCoAuthors(c.commit.message);
204
+ return {
205
+ id: `commit-${c.sha.slice(0, 8)}`,
206
+ timestamp: c.commit.author.date,
207
+ actor,
208
+ coActors: coActors.length > 0 ? coActors : void 0,
209
+ kind: "commit",
210
+ content: c.commit.message,
211
+ metadata: {
212
+ scope: formatScope(scope),
213
+ sha: c.sha
214
+ }
215
+ };
216
+ }
217
+ function mapPR(pr, scope) {
218
+ const event = {
219
+ id: `pr-${pr.number}`,
220
+ timestamp: pr.created_at,
221
+ actor: mapUser(pr.user),
222
+ kind: pr.merged_at ? "pr_merged" : pr.state === "open" ? "pr_opened" : "pr_closed",
223
+ content: `${pr.title}
224
+
225
+ ${pr.body ?? ""}`.trim(),
226
+ metadata: {
227
+ scope: formatScope(scope),
228
+ pr_number: pr.number,
229
+ state: pr.state,
230
+ merged_at: pr.merged_at
231
+ }
232
+ };
233
+ if (pr.merged_by && pr.merged_by.login !== pr.user.login) {
234
+ event.actor = mapUser(pr.merged_by);
235
+ event.kind = "pr_merged";
236
+ event.timestamp = pr.merged_at ?? pr.updated_at;
237
+ event.respondsTo = {
238
+ eventId: `pr-${pr.number}-opened`,
239
+ actor: mapUser(pr.user)
240
+ };
241
+ }
242
+ return event;
243
+ }
244
+ function mapComment(comment, scope) {
245
+ const issueMatch = comment.issue_url.match(/\/issues\/(\d+)$/);
246
+ const issueNumber = issueMatch ? issueMatch[1] : "unknown";
247
+ const event = {
248
+ id: `comment-${comment.id}`,
249
+ timestamp: comment.created_at,
250
+ actor: mapUser(comment.user),
251
+ kind: "comment",
252
+ content: comment.body,
253
+ respondsTo: {
254
+ eventId: `pr-${issueNumber}`,
255
+ actor: { id: "unknown", kind: "unknown" }
256
+ },
257
+ metadata: {
258
+ scope: formatScope(scope),
259
+ issue_number: issueNumber
260
+ }
261
+ };
262
+ return event;
263
+ }
264
+ var KNOWN_AI_LOGINS = /* @__PURE__ */ new Set([
265
+ "github-actions[bot]",
266
+ "dependabot[bot]",
267
+ "renovate[bot]",
268
+ "copilot"
269
+ ]);
270
+ var KNOWN_AI_CO_AUTHOR_NAMES = /* @__PURE__ */ new Set([
271
+ "claude",
272
+ "copilot",
273
+ "cursor",
274
+ "codeium",
275
+ "tabnine",
276
+ "codex"
277
+ ]);
278
+ function mapUser(ghUser, fallbackName) {
279
+ if (!ghUser) {
280
+ return {
281
+ id: fallbackName ?? "unknown",
282
+ kind: "unknown",
283
+ name: fallbackName
284
+ };
285
+ }
286
+ let kind = "human";
287
+ if (ghUser.type === "Bot" || ghUser.login.endsWith("[bot]")) {
288
+ kind = "bot";
289
+ }
290
+ if (KNOWN_AI_LOGINS.has(ghUser.login.toLowerCase())) {
291
+ kind = "bot";
292
+ }
293
+ return {
294
+ id: ghUser.login,
295
+ kind,
296
+ name: ghUser.login
297
+ };
298
+ }
299
+ function extractCoAuthors(message) {
300
+ const coAuthors = [];
301
+ const lines = message.split("\n");
302
+ for (const line of lines) {
303
+ const match = line.match(
304
+ /^Co-authored-by:\s*(.+?)\s*<([^>]*)>/i
305
+ );
306
+ if (match) {
307
+ const name = match[1].trim().toLowerCase();
308
+ const isAI = KNOWN_AI_CO_AUTHOR_NAMES.has(name) || [...KNOWN_AI_CO_AUTHOR_NAMES].some((ai) => name.includes(ai));
309
+ coAuthors.push({
310
+ id: match[2] || name,
311
+ kind: isAI ? "ai" : "human",
312
+ name: match[1].trim()
313
+ });
314
+ }
315
+ }
316
+ return coAuthors;
317
+ }
318
+ async function fetchJSON(url, headers) {
319
+ const res = await fetch(url, { headers });
320
+ if (!res.ok) {
321
+ if (res.status === 404) return [];
322
+ if (res.status === 403) {
323
+ const body = await res.text();
324
+ if (body.includes("rate limit")) {
325
+ throw new Error(
326
+ `GitHub API rate limit exceeded. Wait or use a token with higher limits.`
327
+ );
328
+ }
329
+ }
330
+ throw new Error(
331
+ `GitHub API error ${res.status} for ${url}: ${(await res.text()).slice(0, 300)}`
332
+ );
333
+ }
334
+ return await res.json();
335
+ }
336
+ function createMockGitHubAdapter(fixedEvents) {
337
+ return async () => fixedEvents;
338
+ }
339
+
340
+ // src/radiant/adapters/exocortex.ts
341
+ import { readFileSync, existsSync } from "fs";
342
+ import { join, resolve } from "path";
343
+ function readExocortex(dirPath) {
344
+ const dir = resolve(dirPath);
345
+ let filesLoaded = 0;
346
+ function tryRead(...paths) {
347
+ for (const p of paths) {
348
+ const full = join(dir, p);
349
+ if (existsSync(full)) {
350
+ try {
351
+ const content = readFileSync(full, "utf-8").trim();
352
+ if (content) {
353
+ filesLoaded++;
354
+ return content;
355
+ }
356
+ } catch {
357
+ }
358
+ }
359
+ }
360
+ return null;
361
+ }
362
+ const ctx = {
363
+ attention: tryRead("attention.md"),
364
+ goals: tryRead("goals.md"),
365
+ identity: tryRead("identity.md"),
366
+ sprint: tryRead("sprint.md", "src/sprint.md"),
367
+ organization: tryRead("org/organization.md", "org/src/organization.md"),
368
+ methods: tryRead("org/methods.md", "org/src/methods.md"),
369
+ source: dir,
370
+ filesLoaded
371
+ };
372
+ return ctx;
373
+ }
374
+ function formatExocortexForPrompt(ctx) {
375
+ if (ctx.filesLoaded === 0) return "";
376
+ const sections = [];
377
+ sections.push(
378
+ "## Stated Intent (from exocortex)\n\nThe following is what the person/team SAYS they are doing, focused on, and working toward. Compare this against the ACTUAL activity from GitHub. Where stated intent and observed behavior diverge, that gap is the most valuable signal in this read. Name it directly."
379
+ );
380
+ if (ctx.attention) {
381
+ sections.push(`### Current attention
382
+
383
+ ${ctx.attention}`);
384
+ }
385
+ if (ctx.goals) {
386
+ sections.push(`### Goals
387
+
388
+ ${ctx.goals}`);
389
+ }
390
+ if (ctx.sprint) {
391
+ sections.push(`### Sprint focus
392
+
393
+ ${ctx.sprint}`);
394
+ }
395
+ if (ctx.identity) {
396
+ sections.push(`### Identity and values
397
+
398
+ ${ctx.identity}`);
399
+ }
400
+ if (ctx.organization) {
401
+ sections.push(`### Organization
402
+
403
+ ${ctx.organization}`);
404
+ }
405
+ if (ctx.methods) {
406
+ sections.push(`### Methods
407
+
408
+ ${ctx.methods}`);
409
+ }
410
+ return sections.join("\n\n");
411
+ }
412
+ function summarizeExocortex(ctx) {
413
+ if (ctx.filesLoaded === 0) return "no exocortex files found";
414
+ const loaded = [];
415
+ if (ctx.attention) loaded.push("attention");
416
+ if (ctx.goals) loaded.push("goals");
417
+ if (ctx.sprint) loaded.push("sprint");
418
+ if (ctx.identity) loaded.push("identity");
419
+ if (ctx.organization) loaded.push("org");
420
+ if (ctx.methods) loaded.push("methods");
421
+ return `${loaded.join(", ")} (${ctx.filesLoaded} files)`;
422
+ }
423
+
424
+ // src/radiant/core/domain.ts
425
+ function isLifeSide(k) {
426
+ return k === "human" || k === "unknown";
427
+ }
428
+ function isCyberSide(k) {
429
+ return k === "ai" || k === "bot";
430
+ }
431
+ function crossesBoundary(a, b) {
432
+ return isLifeSide(a) && isCyberSide(b) || isCyberSide(a) && isLifeSide(b);
433
+ }
434
+ function classifyActorDomain(event) {
435
+ const primaryKind = event.actor.kind;
436
+ const coKinds = (event.coActors ?? []).map((a) => a.kind);
437
+ const allKinds = [primaryKind, ...coKinds];
438
+ const hasLife = allKinds.some(isLifeSide);
439
+ const hasCyber = allKinds.some(isCyberSide);
440
+ if (hasLife && hasCyber) {
441
+ return "joint";
442
+ }
443
+ if (event.respondsTo && crossesBoundary(primaryKind, event.respondsTo.actor.kind)) {
444
+ return "joint";
445
+ }
446
+ return isCyberSide(primaryKind) ? "cyber" : "life";
447
+ }
448
+
449
+ // src/radiant/core/signals.ts
450
+ function classifyEvents(events) {
451
+ return events.map((event) => ({
452
+ event,
453
+ domain: classifyActorDomain(event)
454
+ }));
455
+ }
456
+ function extractSignals(events, extractors = DEFAULT_SIGNAL_EXTRACTORS) {
457
+ const domains = ["life", "cyber", "joint"];
458
+ const out = [];
459
+ for (const extractor of extractors) {
460
+ for (const domain of domains) {
461
+ const r = extractor.extract(events, domain);
462
+ out.push({
463
+ id: extractor.id,
464
+ domain,
465
+ score: r.score,
466
+ eventCount: r.eventCount,
467
+ confidence: r.confidence
468
+ });
469
+ }
470
+ }
471
+ return out;
472
+ }
473
+ var ZERO = { score: 0, eventCount: 0, confidence: 0 };
474
+ function inDomain(events, domain) {
475
+ return events.filter((e) => e.domain === domain);
476
+ }
477
+ function confidenceFromCount(count) {
478
+ return Math.min(1, count / 10);
479
+ }
480
+ function clamp100(n) {
481
+ if (n < 0) return 0;
482
+ if (n > 100) return 100;
483
+ return n;
484
+ }
485
+ var CLARITY_EXTRACTOR = {
486
+ id: "clarity",
487
+ description: "Informativeness of event content \u2014 commit messages, PR bodies, review text",
488
+ extract(events, domain) {
489
+ const sub = inDomain(events, domain);
490
+ if (sub.length === 0) return ZERO;
491
+ const totalScore = sub.reduce((acc, e) => {
492
+ const len = (e.event.content ?? "").length;
493
+ const norm = Math.min(len, 200) / 200;
494
+ return acc + norm * 100;
495
+ }, 0);
496
+ return {
497
+ score: clamp100(totalScore / sub.length),
498
+ eventCount: sub.length,
499
+ confidence: confidenceFromCount(sub.length)
500
+ };
501
+ }
502
+ };
503
+ var OWNERSHIP_EXTRACTOR = {
504
+ id: "ownership",
505
+ description: "Clarity of accountability \u2014 fraction of events with a known primary actor",
506
+ extract(events, domain) {
507
+ const sub = inDomain(events, domain);
508
+ if (sub.length === 0) return ZERO;
509
+ const attributed = sub.filter((e) => e.event.actor.kind !== "unknown").length;
510
+ return {
511
+ score: clamp100(attributed / sub.length * 100),
512
+ eventCount: sub.length,
513
+ confidence: confidenceFromCount(sub.length)
514
+ };
515
+ }
516
+ };
517
+ var FOLLOW_THROUGH_EXTRACTOR = {
518
+ id: "follow_through",
519
+ description: "Fraction of events that were followed up \u2014 i.e. referenced by a later event",
520
+ extract(events, domain) {
521
+ const sub = inDomain(events, domain);
522
+ if (sub.length === 0) return ZERO;
523
+ const referencedIds = /* @__PURE__ */ new Set();
524
+ for (const e of events) {
525
+ const ref = e.event.respondsTo?.eventId;
526
+ if (ref) referencedIds.add(ref);
527
+ }
528
+ const followedUp = sub.filter((e) => referencedIds.has(e.event.id)).length;
529
+ return {
530
+ score: clamp100(followedUp / sub.length * 100),
531
+ eventCount: sub.length,
532
+ confidence: confidenceFromCount(sub.length)
533
+ };
534
+ }
535
+ };
536
+ var ALIGNMENT_EXTRACTOR = {
537
+ id: "alignment",
538
+ description: "Coordination pressure \u2014 fraction of events that reference a prior event",
539
+ extract(events, domain) {
540
+ const sub = inDomain(events, domain);
541
+ if (sub.length === 0) return ZERO;
542
+ const referencing = sub.filter((e) => e.event.respondsTo !== void 0).length;
543
+ return {
544
+ score: clamp100(referencing / sub.length * 100),
545
+ eventCount: sub.length,
546
+ confidence: confidenceFromCount(sub.length)
547
+ };
548
+ }
549
+ };
550
+ var DECISION_MOMENTUM_EXTRACTOR = {
551
+ id: "decision_momentum",
552
+ description: "Rate of activity in this domain \u2014 events per day, capped at 10/day",
553
+ extract(events, domain) {
554
+ const sub = inDomain(events, domain);
555
+ if (sub.length === 0) return ZERO;
556
+ if (sub.length < 2) {
557
+ return {
558
+ score: 20,
559
+ // token non-zero score — single event = some motion
560
+ eventCount: sub.length,
561
+ confidence: confidenceFromCount(sub.length)
562
+ };
563
+ }
564
+ const ts = sub.map((e) => Date.parse(e.event.timestamp)).sort((a, b) => a - b);
565
+ const spanMs = ts[ts.length - 1] - ts[0];
566
+ const spanDays = Math.max(spanMs / (24 * 60 * 60 * 1e3), 1 / 24);
567
+ const perDay = sub.length / spanDays;
568
+ const normalized = Math.min(perDay, 10) / 10;
569
+ return {
570
+ score: clamp100(normalized * 100),
571
+ eventCount: sub.length,
572
+ confidence: confidenceFromCount(sub.length)
573
+ };
574
+ }
575
+ };
576
+ var DEFAULT_SIGNAL_EXTRACTORS = Object.freeze([
577
+ CLARITY_EXTRACTOR,
578
+ OWNERSHIP_EXTRACTOR,
579
+ FOLLOW_THROUGH_EXTRACTOR,
580
+ ALIGNMENT_EXTRACTOR,
581
+ DECISION_MOMENTUM_EXTRACTOR
582
+ ]);
583
+
584
+ // src/radiant/types.ts
585
+ var DEFAULT_EVIDENCE_GATE = { k: 3, c: 0.5 };
586
+ function isScored(s) {
587
+ return typeof s === "number";
588
+ }
589
+ function isSentinel(s) {
590
+ return typeof s === "string";
591
+ }
592
+
593
+ // src/radiant/core/math.ts
594
+ function isPresent(o, gate = DEFAULT_EVIDENCE_GATE) {
595
+ return o.eventCount >= gate.k && o.confidence >= gate.c;
596
+ }
597
+ function presenceAverage(items, gate = DEFAULT_EVIDENCE_GATE) {
598
+ const present = items.filter((i) => isPresent(i, gate));
599
+ if (present.length === 0) return "INSUFFICIENT_EVIDENCE";
600
+ const sum = present.reduce((acc, i) => acc + i.score, 0);
601
+ return sum / present.length;
602
+ }
603
+ function scoreLife(capability, gate = DEFAULT_EVIDENCE_GATE) {
604
+ return presenceAverage(capability.dimensions, gate);
605
+ }
606
+ function scoreCyber(capability, gate = DEFAULT_EVIDENCE_GATE) {
607
+ return presenceAverage(capability.dimensions, gate);
608
+ }
609
+ function scoreNeuroVerse(components, worldmodelLoaded, gate = DEFAULT_EVIDENCE_GATE) {
610
+ if (!worldmodelLoaded) return "UNAVAILABLE";
611
+ return presenceAverage(components, gate);
612
+ }
613
+ function scoreComposite(a_L, a_C, a_N) {
614
+ const available = [];
615
+ if (isScored(a_L)) available.push(a_L);
616
+ if (isScored(a_C)) available.push(a_C);
617
+ if (isScored(a_N)) available.push(a_N);
618
+ if (available.length === 0) return "INSUFFICIENT_EVIDENCE";
619
+ return available.reduce((a, b) => a + b, 0) / available.length;
620
+ }
621
+
622
+ // src/radiant/core/patterns.ts
623
+ async function interpretPatterns(input) {
624
+ const prompt = buildInterpretationPrompt(input);
625
+ const raw = await input.ai.complete(prompt, "Analyze the activity and produce the read.");
626
+ const parsed = parseInterpretation(raw, input.canonicalPatterns ?? []);
627
+ return {
628
+ patterns: parsed.patterns,
629
+ meaning: parsed.meaning,
630
+ move: parsed.move,
631
+ raw_ai_response: raw
632
+ };
633
+ }
634
+ function buildInterpretationPrompt(input) {
635
+ const signalSummary = formatSignalSummary(input.signals);
636
+ const eventSample = formatEventSample(input.events, 30);
637
+ const canonicalList = (input.canonicalPatterns ?? []).length > 0 ? `Patterns the organization has already named (use these names if you see them):
638
+ ${input.canonicalPatterns.map((p) => `- ${p}`).join("\n")}` : "No patterns have been named yet. Everything you observe is new.";
639
+ const frame = input.lens.primary_frame;
640
+ const evalQuestions = frame.evaluation_questions.map((q, i) => `${i + 1}. ${q}`).join("\n");
641
+ const forbiddenList = input.lens.forbidden_phrases.map((p) => `- "${p}"`).join("\n");
642
+ const jargonTable = Object.entries(input.lens.vocabulary.jargon_translations).map(([internal, plain]) => ` "${internal}" \u2192 "${plain}"`).join("\n");
643
+ return `You are a behavioral intelligence system reading team activity and producing a read for the reader who needs to act on it.
644
+
645
+ ## Context the reader has loaded
646
+
647
+ ${input.worldmodelContent}
648
+
649
+ ## What happened this window
650
+
651
+ ### Signal matrix (what Radiant measured)
652
+
653
+ ${signalSummary}
654
+
655
+ ### Recent events (sample)
656
+
657
+ ${eventSample}
658
+
659
+ ## How to reason
660
+
661
+ Reason through these questions INTERNALLY \u2014 do not list them in your output:
662
+
663
+ ${evalQuestions}
664
+
665
+ Scoring rubric: ${frame.scoring_rubric}
666
+
667
+ ${canonicalList}
668
+
669
+ ${input.statedIntent ? input.statedIntent + "\n" : ""}## Voice: speak like an Auki builder, not like a status report
670
+
671
+ The reader wants to know **what this means and what to do**, not "what happened." Frame every observation as consequence + implication, not just description.
672
+
673
+ Wrong voice (status report):
674
+ "Rapid deployment of complex technical architecture through composable commits."
675
+ "Signal extraction across life, cyber, and joint domains enables consistent behavioral analysis."
676
+ "Decision momentum scores suggest architectural delivery without corresponding strategic direction setting."
677
+
678
+ Right voice (Auki builder):
679
+ "Shipping pace is high. The architecture is getting ahead of strategic decisions \u2014 velocity without a declared target."
680
+ "Every pattern is new. Nothing is being tracked by name yet. That's fine for now; it becomes a problem when patterns repeat and you still don't have vocabulary for them."
681
+ "The work is converging across three modules. The story of HOW they compose isn't being told yet."
682
+
683
+ The difference: consequence in plain English, not observation in system vocabulary.
684
+
685
+ ## Translate internal jargon to plain English
686
+
687
+ Readers don't know Radiant's vocabulary. Before ANY description appears in your output, translate these:
688
+
689
+ ${jargonTable}
690
+
691
+ For example: don't say "update the worldmodel." Say "add a line to your strategy file."
692
+
693
+ ## Health is a valid read
694
+
695
+ If the activity is healthy and aligned with the worldmodel, SAY SO. Don't fabricate problems. Over-prescription is a voice failure. Legitimate outputs include:
696
+
697
+ "Nothing's broken. Keep shipping."
698
+ "This is what healthy looks like \u2014 the invariants are holding."
699
+ "Nothing here needs action."
700
+
701
+ Only recommend a move when the evidence actually calls for one.
702
+
703
+ ## Output schema \u2014 JSON object
704
+
705
+ \`\`\`json
706
+ {
707
+ "patterns": [
708
+ {
709
+ "name": "pattern_name_snake_case",
710
+ "type": "canonical" | "candidate",
711
+ "description": "Consequence-framed, plain-English, 1-2 sentences. The reader understands why this matters, not just what you observed.",
712
+ "evidence": {
713
+ "signals": ["signal_id.domain", ...],
714
+ "events": ["event_id", ...],
715
+ "cited_invariant": "invariant_name_or_null"
716
+ },
717
+ "confidence": 0.0 to 1.0
718
+ }
719
+ ],
720
+ "meaning": "3-5 sentences. Weave the patterns into ONE strategic thesis. Compress. The reader should finish this paragraph and understand the one thing that matters most in this read. Plain English \u2014 no system jargon.",
721
+ "move": "1-3 direct imperatives, OR explicit 'nothing to act on' if the read is healthy. Do not fabricate urgency. Examples: 'Force cross-module ownership this sprint.' / 'Nothing's broken. Keep shipping.' / 'If you want future reads to track this pattern by name, add a line to your strategy file.'"
722
+ }
723
+ \`\`\`
724
+
725
+ ## Hard rules
726
+
727
+ - Every signal you cite MUST appear in the signal matrix above
728
+ - Every event you cite MUST appear in the events sample above
729
+ - Do not invent signals or events that aren't in the data
730
+ - Candidate patterns must have type "candidate"
731
+ - No hedging, no hype vocabulary
732
+ - Apply jargon translation before output
733
+ - Health-is-valid \u2014 don't invent problems
734
+ - Return ONLY the JSON object, no other text
735
+
736
+ Do NOT use these phrases anywhere in your output:
737
+ ${forbiddenList}`;
738
+ }
739
+ function formatSignalSummary(signals) {
740
+ const lines = [];
741
+ const domains = ["life", "cyber", "joint"];
742
+ for (const domain of domains) {
743
+ const domainSignals = signals.filter((s) => s.domain === domain);
744
+ if (domainSignals.length === 0) continue;
745
+ lines.push(`### ${domain}`);
746
+ for (const s of domainSignals) {
747
+ const gate = s.eventCount >= 3 && s.confidence >= 0.5 ? "\u2713" : "\u25CB";
748
+ lines.push(
749
+ ` ${gate} ${s.id}: score=${s.score.toFixed(1)}, events=${s.eventCount}, conf=${s.confidence.toFixed(2)}`
750
+ );
751
+ }
752
+ }
753
+ return lines.join("\n");
754
+ }
755
+ function formatEventSample(events, maxEvents) {
756
+ const sample = events.slice(-maxEvents);
757
+ return sample.map((e) => {
758
+ const content = (e.event.content ?? "").slice(0, 200);
759
+ const respondsTo = e.event.respondsTo ? ` (responds to ${e.event.respondsTo.eventId})` : "";
760
+ return `- [${e.domain}] ${e.event.id} | ${e.event.actor.kind}:${e.event.actor.id} | ${e.event.kind ?? "event"}${respondsTo}
761
+ "${content}"`;
762
+ }).join("\n");
763
+ }
764
+ function parseInterpretation(raw, canonicalNames) {
765
+ let meaning = "";
766
+ let move = "";
767
+ let patternsArray = [];
768
+ const objMatch = raw.match(/\{[\s\S]*"patterns"[\s\S]*\}/);
769
+ if (objMatch) {
770
+ try {
771
+ const obj = JSON.parse(objMatch[0]);
772
+ if (Array.isArray(obj.patterns)) {
773
+ patternsArray = obj.patterns;
774
+ }
775
+ if (typeof obj.meaning === "string") meaning = obj.meaning;
776
+ if (typeof obj.move === "string") move = obj.move;
777
+ } catch {
778
+ }
779
+ }
780
+ if (patternsArray.length === 0) {
781
+ const arrMatch = raw.match(/\[[\s\S]*\]/);
782
+ if (arrMatch) {
783
+ try {
784
+ const arr = JSON.parse(arrMatch[0]);
785
+ if (Array.isArray(arr)) patternsArray = arr;
786
+ } catch {
787
+ }
788
+ }
789
+ }
790
+ const canonicalSet = new Set(canonicalNames.map((n) => n.toLowerCase()));
791
+ const patterns = [];
792
+ for (const item of patternsArray) {
793
+ if (!isPatternLike(item)) continue;
794
+ const nameStr = String(item.name ?? "unnamed");
795
+ const ev = item.evidence;
796
+ const isCanonical = item.type === "canonical" || canonicalSet.has(nameStr.toLowerCase());
797
+ patterns.push({
798
+ name: nameStr,
799
+ type: isCanonical ? "canonical" : "candidate",
800
+ declaredAs: isCanonical ? nameStr : void 0,
801
+ description: String(item.description ?? ""),
802
+ evidence: {
803
+ signals: Array.isArray(ev?.signals) ? ev.signals.map(String) : [],
804
+ events: Array.isArray(ev?.events) ? ev.events.map(String) : [],
805
+ cited_invariant: ev?.cited_invariant ? String(ev.cited_invariant) : void 0
806
+ },
807
+ confidence: typeof item.confidence === "number" ? Math.max(0, Math.min(1, item.confidence)) : 0.5
808
+ });
809
+ }
810
+ return { patterns, meaning, move };
811
+ }
812
+ function isPatternLike(x) {
813
+ return typeof x === "object" && x !== null && "name" in x;
814
+ }
815
+
816
+ // src/radiant/core/renderer.ts
817
+ function render(input) {
818
+ const text = renderText(input);
819
+ const frontmatter = renderFrontmatter(input);
820
+ return { text, frontmatter };
821
+ }
822
+ function renderText(input) {
823
+ const sections = [];
824
+ sections.push(
825
+ `Scope: ${formatScope(input.scope)}
826
+ Window: last ${input.windowDays} days \xB7 ${input.eventCount} events
827
+ Lens: ${input.lens.name}`
828
+ );
829
+ if (input.patterns.length > 0) {
830
+ const canonical = input.patterns.filter((p) => p.type === "canonical");
831
+ const candidates = input.patterns.filter((p) => p.type === "candidate");
832
+ let emergentBlock = "EMERGENT\n";
833
+ if (canonical.length > 0) {
834
+ for (const p of canonical) {
835
+ emergentBlock += `
836
+ ${p.name}
837
+ `;
838
+ emergentBlock += ` ${p.description}
839
+ `;
840
+ }
841
+ }
842
+ if (candidates.length > 0) {
843
+ emergentBlock += "\n Emergent (candidates \u2014 not yet in worldmodel)\n";
844
+ for (const p of candidates) {
845
+ emergentBlock += `
846
+ ${p.name} (candidate)
847
+ `;
848
+ emergentBlock += ` ${p.description}
849
+ `;
850
+ if (p.evidence.cited_invariant) {
851
+ emergentBlock += ` Cited invariant: ${p.evidence.cited_invariant}
852
+ `;
853
+ }
854
+ }
855
+ }
856
+ sections.push(emergentBlock.trimEnd());
857
+ }
858
+ if (input.meaning) {
859
+ sections.push(`MEANING
860
+
861
+ ${input.meaning.split("\n").join("\n ")}`);
862
+ }
863
+ if (input.move) {
864
+ sections.push(`MOVE
865
+
866
+ ${input.move.split("\n").join("\n ")}`);
867
+ }
868
+ const alignBlock = [
869
+ "ALIGNMENT",
870
+ "",
871
+ ` Human work: ${formatScore(input.scores.A_L)}`,
872
+ ` AI work: ${formatScore(input.scores.A_C)}`,
873
+ ` Human\u2013AI collaboration: ${formatScore(input.scores.A_N)}`,
874
+ ` Composite: ${formatScore(input.scores.R)}`
875
+ ].join("\n");
876
+ sections.push(alignBlock);
877
+ sections.push(renderDepth(input.priorReadCount ?? 0, input.windowDays));
878
+ return sections.join("\n\n");
879
+ }
880
+ function renderDepth(priorReads, windowDays) {
881
+ if (priorReads === 0) {
882
+ return [
883
+ "DEPTH",
884
+ "",
885
+ ` This is your first read. Radiant sees ${windowDays} days of activity`,
886
+ " but has no prior baseline to compare against.",
887
+ "",
888
+ " Available now:",
889
+ " \u2713 Signal extraction across life / cyber / joint domains",
890
+ " \u2713 Pattern identification (canonical + candidates)",
891
+ " \u2713 Alignment scoring",
892
+ "",
893
+ " Available after 2+ reads:",
894
+ " \xB7 Drift detection (is alignment improving or degrading?)",
895
+ ' \xB7 Baselines (what does "normal" look like for this team?)',
896
+ " \xB7 Pattern confidence (are these patterns persistent or noise?)",
897
+ " \xB7 Evolution proposals (should the worldmodel adapt?)",
898
+ "",
899
+ " Run again next week. The read gets sharper every time."
900
+ ].join("\n");
901
+ }
902
+ if (priorReads < 4) {
903
+ return [
904
+ "DEPTH",
905
+ "",
906
+ ` Read ${priorReads + 1} of this scope. Baseline forming.`,
907
+ "",
908
+ " Available now:",
909
+ " \u2713 Signal extraction + pattern identification + alignment scoring",
910
+ ` \u2713 Drift detection (comparing against ${priorReads} prior read${priorReads > 1 ? "s" : ""})`,
911
+ " \xB7 Baselines stabilizing (need 4+ reads for reliable averages)",
912
+ " \xB7 Pattern confidence accumulating",
913
+ "",
914
+ " The read sharpens with each run."
915
+ ].join("\n");
916
+ }
917
+ return [
918
+ "DEPTH",
919
+ "",
920
+ ` Read ${priorReads + 1} of this scope. Baseline established.`,
921
+ "",
922
+ " Available:",
923
+ " \u2713 Signal extraction + pattern identification + alignment scoring",
924
+ " \u2713 Drift detection against established baseline",
925
+ " \u2713 Pattern confidence (persistent vs noise)",
926
+ " \u2713 Evolution proposals (candidate patterns with enough history to evaluate)"
927
+ ].join("\n");
928
+ }
929
+ function formatScore(s) {
930
+ if (!isScored(s)) {
931
+ if (s === "UNAVAILABLE") return "not available (no worldmodel loaded)";
932
+ return "not enough signal to call yet";
933
+ }
934
+ const n = Math.round(s);
935
+ let label;
936
+ if (n >= 75) label = "STRONG";
937
+ else if (n >= 60) label = "STABLE";
938
+ else if (n >= 45) label = "needs attention";
939
+ else if (n >= 30) label = "concerning";
940
+ else label = "critical";
941
+ return `${n} \xB7 ${label}`;
942
+ }
943
+ function renderFrontmatter(input) {
944
+ const now = (/* @__PURE__ */ new Date()).toISOString();
945
+ const signalsByDomain = groupSignalsByDomain(input.signals);
946
+ const patternEntries = input.patterns.map((p) => {
947
+ const entry = {
948
+ name: p.name,
949
+ type: p.type,
950
+ conf: Number(p.confidence.toFixed(2)),
951
+ evidence_signals: p.evidence.signals,
952
+ evidence_events: p.evidence.events
953
+ };
954
+ if (p.evidence.cited_invariant) {
955
+ entry.cited_invariant = p.evidence.cited_invariant;
956
+ }
957
+ return entry;
958
+ });
959
+ const frontmatter = {
960
+ radiant_read: {
961
+ scope: formatScope(input.scope),
962
+ window: `${input.windowDays}d`,
963
+ timestamp: now,
964
+ lens: input.lens.name
965
+ },
966
+ events: {
967
+ total: input.eventCount
968
+ },
969
+ signals: signalsByDomain,
970
+ scores: {
971
+ A_L: isScored(input.scores.A_L) ? Math.round(input.scores.A_L) : String(input.scores.A_L),
972
+ A_C: isScored(input.scores.A_C) ? Math.round(input.scores.A_C) : String(input.scores.A_C),
973
+ A_N: isScored(input.scores.A_N) ? Math.round(input.scores.A_N) : String(input.scores.A_N),
974
+ R: isScored(input.scores.R) ? Math.round(input.scores.R) : String(input.scores.R)
975
+ },
976
+ patterns: patternEntries
977
+ };
978
+ return "---\n" + serializeYAML(frontmatter) + "---";
979
+ }
980
+ function groupSignalsByDomain(signals) {
981
+ const result = {};
982
+ for (const s of signals) {
983
+ if (!result[s.domain]) result[s.domain] = {};
984
+ result[s.domain][s.id] = {
985
+ score: Number(s.score.toFixed(1)),
986
+ n: s.eventCount,
987
+ conf: Number(s.confidence.toFixed(2))
988
+ };
989
+ }
990
+ return result;
991
+ }
992
+ function serializeYAML(obj, indent = 0) {
993
+ const pad = " ".repeat(indent);
994
+ if (obj === null || obj === void 0) return "null\n";
995
+ if (typeof obj === "string") return `${JSON.stringify(obj)}
996
+ `;
997
+ if (typeof obj === "number" || typeof obj === "boolean") return `${obj}
998
+ `;
999
+ if (Array.isArray(obj)) {
1000
+ if (obj.length === 0) return "[]\n";
1001
+ if (obj.every((item) => typeof item === "string" || typeof item === "number")) {
1002
+ return `[${obj.map((item) => JSON.stringify(item)).join(", ")}]
1003
+ `;
1004
+ }
1005
+ let result = "\n";
1006
+ for (const item of obj) {
1007
+ if (typeof item === "object" && item !== null && !Array.isArray(item)) {
1008
+ const entries = Object.entries(item);
1009
+ result += `${pad}- ${entries[0][0]}: ${serializeYAML(entries[0][1], 0).trim()}
1010
+ `;
1011
+ for (let i = 1; i < entries.length; i++) {
1012
+ result += `${pad} ${entries[i][0]}: ${serializeYAML(entries[i][1], indent + 2).trim()}
1013
+ `;
1014
+ }
1015
+ } else {
1016
+ result += `${pad}- ${serializeYAML(item, indent + 1).trim()}
1017
+ `;
1018
+ }
1019
+ }
1020
+ return result;
1021
+ }
1022
+ if (typeof obj === "object") {
1023
+ const entries = Object.entries(obj);
1024
+ if (entries.length === 0) return "{}\n";
1025
+ let result = "\n";
1026
+ for (const [key, value] of entries) {
1027
+ if (typeof value === "object" && value !== null) {
1028
+ result += `${pad}${key}:${serializeYAML(value, indent + 1)}`;
1029
+ } else {
1030
+ result += `${pad}${key}: ${serializeYAML(value, indent).trim()}
1031
+ `;
1032
+ }
1033
+ }
1034
+ return result;
1035
+ }
1036
+ return `${obj}
1037
+ `;
1038
+ }
1039
+
1040
+ // src/radiant/commands/emergent.ts
1041
+ async function emergent(input) {
1042
+ const lens = resolveLens2(input.lensId);
1043
+ const windowDays = input.windowDays ?? 14;
1044
+ let statedIntent;
1045
+ let exocortexContext;
1046
+ if (input.exocortexPath) {
1047
+ exocortexContext = readExocortex(input.exocortexPath);
1048
+ const formatted = formatExocortexForPrompt(exocortexContext);
1049
+ if (formatted) statedIntent = formatted;
1050
+ }
1051
+ const events = await fetchGitHubActivity(input.scope, input.githubToken, {
1052
+ windowDays
1053
+ });
1054
+ const classified = classifyEvents(events);
1055
+ const signals = extractSignals(classified);
1056
+ const scores = computeScores(signals, input.worldmodelContent !== "");
1057
+ const { patterns, meaning, move } = await interpretPatterns({
1058
+ signals,
1059
+ events: classified,
1060
+ worldmodelContent: input.worldmodelContent,
1061
+ lens,
1062
+ ai: input.ai,
1063
+ canonicalPatterns: input.canonicalPatterns,
1064
+ statedIntent
1065
+ });
1066
+ const rewrittenPatterns = patterns.map((p) => lens.rewrite(p));
1067
+ const allDescriptions = rewrittenPatterns.map((p) => p.description).join("\n");
1068
+ const voiceViolations = checkForbiddenPhrases(lens, allDescriptions);
1069
+ const rendered = render({
1070
+ scope: input.scope,
1071
+ windowDays,
1072
+ eventCount: events.length,
1073
+ signals,
1074
+ patterns: rewrittenPatterns,
1075
+ scores,
1076
+ lens,
1077
+ meaning: meaning || void 0,
1078
+ move: move || void 0
1079
+ });
1080
+ return {
1081
+ text: rendered.text,
1082
+ frontmatter: rendered.frontmatter,
1083
+ voiceViolations,
1084
+ voiceClean: voiceViolations.length === 0,
1085
+ signals,
1086
+ scores,
1087
+ eventCount: events.length
1088
+ };
1089
+ }
1090
+ function computeScores(signals, worldmodelLoaded) {
1091
+ const gate = DEFAULT_EVIDENCE_GATE;
1092
+ const lifeSignals = signals.filter((s) => s.domain === "life");
1093
+ const A_L = scoreLife(
1094
+ { dimensions: lifeSignals.map(signalToDimension) },
1095
+ gate
1096
+ );
1097
+ const cyberSignals = signals.filter((s) => s.domain === "cyber");
1098
+ const A_C = scoreCyber(
1099
+ { dimensions: cyberSignals.map(signalToDimension) },
1100
+ gate
1101
+ );
1102
+ const jointSignals = signals.filter((s) => s.domain === "joint");
1103
+ const A_N = scoreNeuroVerse(
1104
+ jointSignals.map(signalToBridging),
1105
+ worldmodelLoaded,
1106
+ gate
1107
+ );
1108
+ const R = scoreComposite(A_L, A_C, A_N);
1109
+ return { A_L, A_C, A_N, R };
1110
+ }
1111
+ function signalToDimension(s) {
1112
+ return {
1113
+ id: s.id,
1114
+ score: s.score,
1115
+ eventCount: s.eventCount,
1116
+ confidence: s.confidence
1117
+ };
1118
+ }
1119
+ function signalToBridging(s) {
1120
+ return {
1121
+ component: "ALIGN",
1122
+ // Proxy: joint signals → ALIGN component
1123
+ score: s.score,
1124
+ eventCount: s.eventCount,
1125
+ confidence: s.confidence
1126
+ };
1127
+ }
1128
+ function resolveLens2(id) {
1129
+ const lens = getLens(id);
1130
+ if (!lens) {
1131
+ throw new Error(
1132
+ `Lens "${id}" not found. Check the id or register the lens.`
1133
+ );
1134
+ }
1135
+ return lens;
1136
+ }
1137
+
1138
+ // src/radiant/core/ai.ts
1139
+ function createAnthropicAI(apiKey, model = "claude-sonnet-4-20250514", maxTokens = 4096) {
1140
+ return {
1141
+ async complete(systemPrompt, userQuery) {
1142
+ const res = await fetch("https://api.anthropic.com/v1/messages", {
1143
+ method: "POST",
1144
+ headers: {
1145
+ "x-api-key": apiKey,
1146
+ "anthropic-version": "2023-06-01",
1147
+ "content-type": "application/json"
1148
+ },
1149
+ body: JSON.stringify({
1150
+ model,
1151
+ max_tokens: maxTokens,
1152
+ system: systemPrompt,
1153
+ messages: [{ role: "user", content: userQuery }]
1154
+ })
1155
+ });
1156
+ if (!res.ok) {
1157
+ const body = await res.text();
1158
+ throw new Error(
1159
+ `Anthropic API error ${res.status}: ${body.slice(0, 500)}`
1160
+ );
1161
+ }
1162
+ const data = await res.json();
1163
+ const text = data.content?.filter((c) => c.type === "text").map((c) => c.text ?? "").join("");
1164
+ if (!text) {
1165
+ throw new Error("Anthropic returned no text content");
1166
+ }
1167
+ return text;
1168
+ }
1169
+ };
1170
+ }
1171
+ function createMockAI(fixedResponse) {
1172
+ return {
1173
+ async complete() {
1174
+ return fixedResponse;
1175
+ }
1176
+ };
1177
+ }
1178
+
1179
+ export {
1180
+ composeSystemPrompt,
1181
+ checkForbiddenPhrases,
1182
+ think,
1183
+ parseRepoScope,
1184
+ formatScope,
1185
+ fetchGitHubActivity,
1186
+ createMockGitHubAdapter,
1187
+ readExocortex,
1188
+ formatExocortexForPrompt,
1189
+ summarizeExocortex,
1190
+ classifyActorDomain,
1191
+ classifyEvents,
1192
+ extractSignals,
1193
+ DEFAULT_SIGNAL_EXTRACTORS,
1194
+ DEFAULT_EVIDENCE_GATE,
1195
+ isScored,
1196
+ isSentinel,
1197
+ isPresent,
1198
+ presenceAverage,
1199
+ scoreLife,
1200
+ scoreCyber,
1201
+ scoreNeuroVerse,
1202
+ scoreComposite,
1203
+ interpretPatterns,
1204
+ render,
1205
+ emergent,
1206
+ createAnthropicAI,
1207
+ createMockAI
1208
+ };