@aleph-ai/tinyaleph 1.3.0 → 1.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +423 -12
- package/backends/cryptographic/index.js +455 -2
- package/core/beacon.js +735 -0
- package/core/crt-homology.js +1004 -0
- package/core/enochian-vocabulary.js +910 -0
- package/core/enochian.js +744 -0
- package/core/errors.js +587 -0
- package/core/hilbert.js +651 -1
- package/core/index.js +86 -1
- package/core/lambda.js +284 -33
- package/core/logger.js +350 -0
- package/core/prime.js +136 -1
- package/core/quaternion-semantics.js +623 -0
- package/core/reduction.js +391 -1
- package/core/rformer-crt.js +892 -0
- package/core/topology.js +655 -0
- package/docs/README.md +54 -0
- package/docs/design/PYTHON_PORT_DESIGN.md +1400 -0
- package/docs/reference/07-topology.md +257 -0
- package/docs/reference/08-observer.md +421 -0
- package/docs/reference/09-crt-homology.md +369 -0
- package/modular.js +231 -3
- package/package.json +1 -1
package/core/reduction.js
CHANGED
|
@@ -259,7 +259,13 @@ function isReducible(term) {
|
|
|
259
259
|
|
|
260
260
|
/**
|
|
261
261
|
* Compute the size of a term (for termination measure)
|
|
262
|
-
* Definition 3 from ncpsc.pdf
|
|
262
|
+
* Definition 3 from ncpsc.pdf:
|
|
263
|
+
* - |N(p)| = 1
|
|
264
|
+
* - |A(p)| = 1
|
|
265
|
+
* - |FUSE(p,q,r)| = 1
|
|
266
|
+
* - |A(p₁)...A(pₖ)N(q)| = k + 1
|
|
267
|
+
* - |S₁ ∘ S₂| = |S₁| + |S₂|
|
|
268
|
+
* - |S₁ ⇒ S₂| = |S₁| + |S₂|
|
|
263
269
|
*/
|
|
264
270
|
function termSize(term) {
|
|
265
271
|
if (term instanceof NounTerm) return 1;
|
|
@@ -272,6 +278,215 @@ function termSize(term) {
|
|
|
272
278
|
return 1;
|
|
273
279
|
}
|
|
274
280
|
|
|
281
|
+
/**
|
|
282
|
+
* Compute the depth of a term (nesting level)
|
|
283
|
+
* Used for complexity analysis
|
|
284
|
+
*/
|
|
285
|
+
function termDepth(term) {
|
|
286
|
+
if (term instanceof NounTerm) return 0;
|
|
287
|
+
if (term instanceof AdjTerm) return 0;
|
|
288
|
+
if (term instanceof FusionTerm) return 1;
|
|
289
|
+
if (term instanceof ChainTerm) return term.operators.length;
|
|
290
|
+
if (term instanceof NounSentence) return 1 + termDepth(term.expr);
|
|
291
|
+
if (term instanceof SeqSentence) return 1 + Math.max(termDepth(term.left), termDepth(term.right));
|
|
292
|
+
if (term instanceof ImplSentence) return 1 + Math.max(termDepth(term.antecedent), termDepth(term.consequent));
|
|
293
|
+
return 0;
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
/**
|
|
297
|
+
* Extract all primes from a term
|
|
298
|
+
* Used for route analysis
|
|
299
|
+
*/
|
|
300
|
+
function extractPrimes(term) {
|
|
301
|
+
if (term instanceof NounTerm) return [term.prime];
|
|
302
|
+
if (term instanceof AdjTerm) return [term.prime];
|
|
303
|
+
if (term instanceof FusionTerm) return [term.p, term.q, term.r, term.getFusedPrime()];
|
|
304
|
+
if (term instanceof ChainTerm) {
|
|
305
|
+
const primes = term.operators.map(op => op.prime);
|
|
306
|
+
primes.push(term.noun.prime);
|
|
307
|
+
return primes;
|
|
308
|
+
}
|
|
309
|
+
if (term instanceof NounSentence) return extractPrimes(term.expr);
|
|
310
|
+
if (term instanceof SeqSentence) return [...extractPrimes(term.left), ...extractPrimes(term.right)];
|
|
311
|
+
if (term instanceof ImplSentence) return [...extractPrimes(term.antecedent), ...extractPrimes(term.consequent)];
|
|
312
|
+
return [];
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
// ============================================================================
|
|
316
|
+
// FORMAL PROOF TRACE (from ncpsc.pdf §5)
|
|
317
|
+
// ============================================================================
|
|
318
|
+
|
|
319
|
+
/**
|
|
320
|
+
* ProofTrace - Generates formal proofs of normalization
|
|
321
|
+
* Implements Definition 5 from ncpsc.pdf
|
|
322
|
+
*/
|
|
323
|
+
class ProofTrace {
|
|
324
|
+
constructor() {
|
|
325
|
+
this.steps = [];
|
|
326
|
+
this.initialTerm = null;
|
|
327
|
+
this.finalTerm = null;
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
/**
|
|
331
|
+
* Record a proof step with size measurements
|
|
332
|
+
*/
|
|
333
|
+
addStep(rule, before, after, justification) {
|
|
334
|
+
const sizeBefore = termSize(before);
|
|
335
|
+
const sizeAfter = termSize(after);
|
|
336
|
+
|
|
337
|
+
this.steps.push({
|
|
338
|
+
index: this.steps.length,
|
|
339
|
+
rule,
|
|
340
|
+
before: before.signature ? before.signature() : String(before),
|
|
341
|
+
after: after.signature ? after.signature() : String(after),
|
|
342
|
+
sizeBefore,
|
|
343
|
+
sizeAfter,
|
|
344
|
+
sizeDecrease: sizeBefore - sizeAfter,
|
|
345
|
+
justification,
|
|
346
|
+
timestamp: Date.now()
|
|
347
|
+
});
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
/**
|
|
351
|
+
* Check that all steps satisfy size decrease property
|
|
352
|
+
* Lemma 1: e → e' implies |e'| < |e|
|
|
353
|
+
*/
|
|
354
|
+
verifySizeDecrease() {
|
|
355
|
+
for (const step of this.steps) {
|
|
356
|
+
if (step.sizeDecrease <= 0) {
|
|
357
|
+
return {
|
|
358
|
+
valid: false,
|
|
359
|
+
failedStep: step.index,
|
|
360
|
+
reason: `Size did not decrease: |${step.before}| = ${step.sizeBefore}, |${step.after}| = ${step.sizeAfter}`
|
|
361
|
+
};
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
return { valid: true, totalDecrease: this.getTotalSizeDecrease() };
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
/**
|
|
368
|
+
* Get total size decrease through reduction
|
|
369
|
+
*/
|
|
370
|
+
getTotalSizeDecrease() {
|
|
371
|
+
if (this.steps.length === 0) return 0;
|
|
372
|
+
return this.steps[0].sizeBefore - this.steps[this.steps.length - 1].sizeAfter;
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
/**
|
|
376
|
+
* Generate LaTeX proof
|
|
377
|
+
*/
|
|
378
|
+
toLatex() {
|
|
379
|
+
const lines = [];
|
|
380
|
+
lines.push('\\begin{proof}[Strong Normalization]');
|
|
381
|
+
lines.push(`\\textbf{Initial term:} $${this.initialTerm}$`);
|
|
382
|
+
lines.push('');
|
|
383
|
+
lines.push('\\textbf{Reduction sequence:}');
|
|
384
|
+
lines.push('\\begin{align*}');
|
|
385
|
+
|
|
386
|
+
for (const step of this.steps) {
|
|
387
|
+
lines.push(` & ${step.before} \\xrightarrow{\\text{${step.rule}}} ${step.after} \\quad (|\\cdot| = ${step.sizeBefore} \\to ${step.sizeAfter}) \\\\`);
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
lines.push('\\end{align*}');
|
|
391
|
+
lines.push('');
|
|
392
|
+
lines.push(`\\textbf{Final normal form:} $${this.finalTerm}$`);
|
|
393
|
+
lines.push('');
|
|
394
|
+
lines.push('By Lemma 1, each step strictly decreases term size.');
|
|
395
|
+
lines.push('Since size is a natural number bounded below by 1, reduction must terminate.');
|
|
396
|
+
lines.push('\\end{proof}');
|
|
397
|
+
|
|
398
|
+
return lines.join('\n');
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
/**
|
|
402
|
+
* Generate JSON proof certificate
|
|
403
|
+
*/
|
|
404
|
+
toCertificate() {
|
|
405
|
+
const verification = this.verifySizeDecrease();
|
|
406
|
+
|
|
407
|
+
return {
|
|
408
|
+
version: '1.0',
|
|
409
|
+
type: 'strong_normalization_proof',
|
|
410
|
+
initial: this.initialTerm,
|
|
411
|
+
final: this.finalTerm,
|
|
412
|
+
steps: this.steps,
|
|
413
|
+
metrics: {
|
|
414
|
+
totalSteps: this.steps.length,
|
|
415
|
+
totalSizeDecrease: this.getTotalSizeDecrease(),
|
|
416
|
+
initialSize: this.steps.length > 0 ? this.steps[0].sizeBefore : 1,
|
|
417
|
+
finalSize: this.steps.length > 0 ? this.steps[this.steps.length - 1].sizeAfter : 1
|
|
418
|
+
},
|
|
419
|
+
verification,
|
|
420
|
+
timestamp: Date.now()
|
|
421
|
+
};
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
/**
|
|
426
|
+
* ProofGenerator - Creates formal proofs during reduction
|
|
427
|
+
*/
|
|
428
|
+
class ProofGenerator {
|
|
429
|
+
constructor(reducer = null) {
|
|
430
|
+
this.reducer = reducer || new ReductionSystem();
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
/**
|
|
434
|
+
* Generate a formal proof of normalization for a term
|
|
435
|
+
*/
|
|
436
|
+
generateProof(term) {
|
|
437
|
+
const proof = new ProofTrace();
|
|
438
|
+
proof.initialTerm = term.signature ? term.signature() : String(term);
|
|
439
|
+
|
|
440
|
+
let current = term;
|
|
441
|
+
let steps = 0;
|
|
442
|
+
const maxSteps = 1000;
|
|
443
|
+
|
|
444
|
+
while (steps < maxSteps) {
|
|
445
|
+
const reductionStep = this.reducer.step(current);
|
|
446
|
+
if (!reductionStep) {
|
|
447
|
+
proof.finalTerm = current.signature ? current.signature() : String(current);
|
|
448
|
+
break;
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
proof.addStep(
|
|
452
|
+
reductionStep.rule,
|
|
453
|
+
reductionStep.before,
|
|
454
|
+
reductionStep.after,
|
|
455
|
+
this.getJustification(reductionStep)
|
|
456
|
+
);
|
|
457
|
+
|
|
458
|
+
current = reductionStep.after;
|
|
459
|
+
steps++;
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
return proof;
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
/**
|
|
466
|
+
* Get formal justification for a reduction step
|
|
467
|
+
*/
|
|
468
|
+
getJustification(step) {
|
|
469
|
+
switch (step.rule) {
|
|
470
|
+
case 'FUSE':
|
|
471
|
+
return `FUSE-Elim: FUSE(${step.details.p}, ${step.details.q}, ${step.details.r}) = N(${step.details.sum})`;
|
|
472
|
+
case 'APPLY':
|
|
473
|
+
return `Apply-⊕: A(${step.details.operator}) ⊕ N(${step.details.operand}) = N(${step.details.result}) via ${step.details.opName}`;
|
|
474
|
+
case 'SENTENCE_INNER':
|
|
475
|
+
return 'Sentence-Reduce: inner expression reduction';
|
|
476
|
+
case 'SEQ_LEFT':
|
|
477
|
+
return 'Seq-Left: reduce left component';
|
|
478
|
+
case 'SEQ_RIGHT':
|
|
479
|
+
return 'Seq-Right: reduce right component';
|
|
480
|
+
case 'IMPL_ANTE':
|
|
481
|
+
return 'Impl-Ante: reduce antecedent';
|
|
482
|
+
case 'IMPL_CONS':
|
|
483
|
+
return 'Impl-Cons: reduce consequent';
|
|
484
|
+
default:
|
|
485
|
+
return `Rule: ${step.rule}`;
|
|
486
|
+
}
|
|
487
|
+
}
|
|
488
|
+
}
|
|
489
|
+
|
|
275
490
|
// ============================================================================
|
|
276
491
|
// REDUCTION SYSTEM
|
|
277
492
|
// ============================================================================
|
|
@@ -652,6 +867,172 @@ function demonstrateStrongNormalization(term, reducer = null) {
|
|
|
652
867
|
};
|
|
653
868
|
}
|
|
654
869
|
|
|
870
|
+
// ============================================================================
|
|
871
|
+
// ROUTE SET STATISTICS (D(P) analysis from ncpsc.pdf §3)
|
|
872
|
+
// ============================================================================
|
|
873
|
+
|
|
874
|
+
/**
|
|
875
|
+
* RouteStatistics - Analyzes the set D(P) of valid fusion routes
|
|
876
|
+
* From ncpsc.pdf Definition 2: D(P) = {{p,q,r} : p,q,r distinct odd primes, p+q+r = P}
|
|
877
|
+
*/
|
|
878
|
+
class RouteStatistics {
|
|
879
|
+
constructor() {
|
|
880
|
+
this.routeCache = new Map();
|
|
881
|
+
this.primeOccurrence = new Map();
|
|
882
|
+
}
|
|
883
|
+
|
|
884
|
+
/**
|
|
885
|
+
* Get D(P) - all valid triads for prime P
|
|
886
|
+
*/
|
|
887
|
+
getRouteSet(P) {
|
|
888
|
+
if (this.routeCache.has(P)) {
|
|
889
|
+
return this.routeCache.get(P);
|
|
890
|
+
}
|
|
891
|
+
|
|
892
|
+
const routes = FusionTerm.findTriads(P);
|
|
893
|
+
this.routeCache.set(P, routes);
|
|
894
|
+
return routes;
|
|
895
|
+
}
|
|
896
|
+
|
|
897
|
+
/**
|
|
898
|
+
* Compute |D(P)| - number of valid routes for P
|
|
899
|
+
*/
|
|
900
|
+
routeCount(P) {
|
|
901
|
+
return this.getRouteSet(P).length;
|
|
902
|
+
}
|
|
903
|
+
|
|
904
|
+
/**
|
|
905
|
+
* Analyze core seed coverage
|
|
906
|
+
* Which small primes appear most frequently in valid triads?
|
|
907
|
+
*/
|
|
908
|
+
analyzeCoreSeeds(maxPrime = 200) {
|
|
909
|
+
const occurrence = new Map();
|
|
910
|
+
const cooccurrence = new Map();
|
|
911
|
+
|
|
912
|
+
// Analyze all fusion-reachable primes
|
|
913
|
+
for (let P = 11; P <= maxPrime; P++) {
|
|
914
|
+
if (!isPrime(P)) continue;
|
|
915
|
+
|
|
916
|
+
const routes = this.getRouteSet(P);
|
|
917
|
+
if (routes.length === 0) continue;
|
|
918
|
+
|
|
919
|
+
for (const { p, q, r } of routes) {
|
|
920
|
+
// Count single occurrences
|
|
921
|
+
for (const prime of [p, q, r]) {
|
|
922
|
+
occurrence.set(prime, (occurrence.get(prime) || 0) + 1);
|
|
923
|
+
}
|
|
924
|
+
|
|
925
|
+
// Count co-occurrences (which pairs appear together)
|
|
926
|
+
const pairs = [[p,q], [p,r], [q,r]];
|
|
927
|
+
for (const [a, b] of pairs) {
|
|
928
|
+
const key = `${Math.min(a,b)},${Math.max(a,b)}`;
|
|
929
|
+
cooccurrence.set(key, (cooccurrence.get(key) || 0) + 1);
|
|
930
|
+
}
|
|
931
|
+
}
|
|
932
|
+
}
|
|
933
|
+
|
|
934
|
+
// Sort by occurrence count
|
|
935
|
+
const sortedOccurrence = Array.from(occurrence.entries())
|
|
936
|
+
.sort((a, b) => b[1] - a[1]);
|
|
937
|
+
|
|
938
|
+
const sortedCooccurrence = Array.from(cooccurrence.entries())
|
|
939
|
+
.sort((a, b) => b[1] - a[1]);
|
|
940
|
+
|
|
941
|
+
return {
|
|
942
|
+
coreSeeds: sortedOccurrence.slice(0, 10).map(([prime, count]) => ({ prime, count })),
|
|
943
|
+
frequentPairs: sortedCooccurrence.slice(0, 10).map(([pair, count]) => {
|
|
944
|
+
const [a, b] = pair.split(',').map(Number);
|
|
945
|
+
return { pair: [a, b], count };
|
|
946
|
+
}),
|
|
947
|
+
totalRoutes: Array.from(this.routeCache.values()).reduce((sum, routes) => sum + routes.length, 0),
|
|
948
|
+
uniquePrimesInRoutes: occurrence.size
|
|
949
|
+
};
|
|
950
|
+
}
|
|
951
|
+
|
|
952
|
+
/**
|
|
953
|
+
* Compute route density - primes with most fusion routes
|
|
954
|
+
*/
|
|
955
|
+
routeDensityRanking(minPrime = 11, maxPrime = 200) {
|
|
956
|
+
const density = [];
|
|
957
|
+
|
|
958
|
+
for (let P = minPrime; P <= maxPrime; P++) {
|
|
959
|
+
if (!isPrime(P)) continue;
|
|
960
|
+
|
|
961
|
+
const count = this.routeCount(P);
|
|
962
|
+
if (count > 0) {
|
|
963
|
+
density.push({
|
|
964
|
+
prime: P,
|
|
965
|
+
routeCount: count,
|
|
966
|
+
density: count / Math.log(P) // Normalize by log for comparison
|
|
967
|
+
});
|
|
968
|
+
}
|
|
969
|
+
}
|
|
970
|
+
|
|
971
|
+
// Sort by density
|
|
972
|
+
density.sort((a, b) => b.density - a.density);
|
|
973
|
+
|
|
974
|
+
return density;
|
|
975
|
+
}
|
|
976
|
+
|
|
977
|
+
/**
|
|
978
|
+
* Find primes with no valid fusion routes
|
|
979
|
+
* These must be generated only via chain application
|
|
980
|
+
*/
|
|
981
|
+
findUnfusiblePrimes(minPrime = 11, maxPrime = 200) {
|
|
982
|
+
const unfusible = [];
|
|
983
|
+
|
|
984
|
+
for (let P = minPrime; P <= maxPrime; P++) {
|
|
985
|
+
if (!isPrime(P)) continue;
|
|
986
|
+
|
|
987
|
+
if (this.routeCount(P) === 0) {
|
|
988
|
+
unfusible.push(P);
|
|
989
|
+
}
|
|
990
|
+
}
|
|
991
|
+
|
|
992
|
+
return unfusible;
|
|
993
|
+
}
|
|
994
|
+
|
|
995
|
+
/**
|
|
996
|
+
* Analyze 108° closure for all routes
|
|
997
|
+
* From mtspbc.pdf: resonant triads have T(d) ≈ 108k°
|
|
998
|
+
*/
|
|
999
|
+
analyze108Closure(maxPrime = 200) {
|
|
1000
|
+
const closedTriads = [];
|
|
1001
|
+
const twistAngle = p => 360 / p;
|
|
1002
|
+
|
|
1003
|
+
for (let P = 11; P <= maxPrime; P++) {
|
|
1004
|
+
if (!isPrime(P)) continue;
|
|
1005
|
+
|
|
1006
|
+
const routes = this.getRouteSet(P);
|
|
1007
|
+
|
|
1008
|
+
for (const { p, q, r } of routes) {
|
|
1009
|
+
const T = twistAngle(p) + twistAngle(q) + twistAngle(r);
|
|
1010
|
+
const k = Math.round(T / 108);
|
|
1011
|
+
const delta = Math.abs(T - 108 * k);
|
|
1012
|
+
|
|
1013
|
+
if (delta < 5) { // Within 5° of 108k
|
|
1014
|
+
closedTriads.push({
|
|
1015
|
+
p, q, r,
|
|
1016
|
+
target: P,
|
|
1017
|
+
totalTwist: T,
|
|
1018
|
+
closestMultiple: k,
|
|
1019
|
+
delta108: delta
|
|
1020
|
+
});
|
|
1021
|
+
}
|
|
1022
|
+
}
|
|
1023
|
+
}
|
|
1024
|
+
|
|
1025
|
+
// Sort by delta (best closures first)
|
|
1026
|
+
closedTriads.sort((a, b) => a.delta108 - b.delta108);
|
|
1027
|
+
|
|
1028
|
+
return {
|
|
1029
|
+
closedTriads: closedTriads.slice(0, 20), // Top 20
|
|
1030
|
+
totalClosed: closedTriads.length,
|
|
1031
|
+
perfectClosures: closedTriads.filter(t => t.delta108 < 1).length
|
|
1032
|
+
};
|
|
1033
|
+
}
|
|
1034
|
+
}
|
|
1035
|
+
|
|
655
1036
|
// ============================================================================
|
|
656
1037
|
// CONFLUENCE CHECK (Theorem 2)
|
|
657
1038
|
// ============================================================================
|
|
@@ -728,6 +1109,8 @@ module.exports = {
|
|
|
728
1109
|
isNormalForm,
|
|
729
1110
|
isReducible,
|
|
730
1111
|
termSize,
|
|
1112
|
+
termDepth,
|
|
1113
|
+
extractPrimes,
|
|
731
1114
|
|
|
732
1115
|
// Canonicalization
|
|
733
1116
|
FusionCanonicalizer,
|
|
@@ -735,6 +1118,13 @@ module.exports = {
|
|
|
735
1118
|
// Verification
|
|
736
1119
|
NormalFormVerifier,
|
|
737
1120
|
|
|
1121
|
+
// Formal Proofs (from ncpsc.pdf §5)
|
|
1122
|
+
ProofTrace,
|
|
1123
|
+
ProofGenerator,
|
|
1124
|
+
|
|
1125
|
+
// Route Statistics (from ncpsc.pdf §3)
|
|
1126
|
+
RouteStatistics,
|
|
1127
|
+
|
|
738
1128
|
// Proofs
|
|
739
1129
|
demonstrateStrongNormalization,
|
|
740
1130
|
testLocalConfluence
|