@aleph-ai/tinyaleph 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +278 -0
  3. package/backends/cryptographic/index.js +196 -0
  4. package/backends/index.js +15 -0
  5. package/backends/interface.js +89 -0
  6. package/backends/scientific/index.js +272 -0
  7. package/backends/semantic/index.js +527 -0
  8. package/backends/semantic/surface.js +393 -0
  9. package/backends/semantic/two-layer.js +375 -0
  10. package/core/fano.js +127 -0
  11. package/core/hilbert.js +564 -0
  12. package/core/hypercomplex.js +141 -0
  13. package/core/index.js +133 -0
  14. package/core/llm.js +132 -0
  15. package/core/prime.js +184 -0
  16. package/core/resonance.js +695 -0
  17. package/core/rformer-tf.js +1086 -0
  18. package/core/rformer.js +806 -0
  19. package/core/sieve.js +350 -0
  20. package/data.json +8163 -0
  21. package/docs/EXAMPLES_PLAN.md +293 -0
  22. package/docs/README.md +159 -0
  23. package/docs/design/ALEPH_CHAT_ARCHITECTURE.md +499 -0
  24. package/docs/guide/01-quickstart.md +298 -0
  25. package/docs/guide/02-semantic-computing.md +409 -0
  26. package/docs/guide/03-cryptographic.md +420 -0
  27. package/docs/guide/04-scientific.md +494 -0
  28. package/docs/guide/05-llm-integration.md +568 -0
  29. package/docs/guide/06-advanced.md +996 -0
  30. package/docs/guide/README.md +188 -0
  31. package/docs/reference/01-core.md +695 -0
  32. package/docs/reference/02-physics.md +601 -0
  33. package/docs/reference/03-backends.md +892 -0
  34. package/docs/reference/04-engine.md +632 -0
  35. package/docs/reference/README.md +252 -0
  36. package/docs/theory/01-prime-semantics.md +327 -0
  37. package/docs/theory/02-hypercomplex-algebra.md +421 -0
  38. package/docs/theory/03-phase-synchronization.md +364 -0
  39. package/docs/theory/04-entropy-reasoning.md +348 -0
  40. package/docs/theory/05-non-commutativity.md +402 -0
  41. package/docs/theory/06-two-layer-meaning.md +414 -0
  42. package/docs/theory/07-resonant-field-interface.md +419 -0
  43. package/docs/theory/08-semantic-sieve.md +520 -0
  44. package/docs/theory/09-temporal-emergence.md +298 -0
  45. package/docs/theory/10-quaternionic-memory.md +415 -0
  46. package/docs/theory/README.md +162 -0
  47. package/engine/aleph.js +418 -0
  48. package/engine/index.js +7 -0
  49. package/index.js +23 -0
  50. package/modular.js +254 -0
  51. package/package.json +99 -0
  52. package/physics/collapse.js +95 -0
  53. package/physics/entropy.js +88 -0
  54. package/physics/index.js +65 -0
  55. package/physics/kuramoto.js +91 -0
  56. package/physics/lyapunov.js +80 -0
  57. package/physics/oscillator.js +95 -0
  58. package/types/index.d.ts +575 -0
@@ -0,0 +1,568 @@
1
+ # LLM Integration
2
+
3
+ This guide covers coupling Aleph with Large Language Models for enhanced reasoning, entropy minimization, and bidirectional semantic grounding.
4
+
5
+ ## Overview
6
+
7
+ Aleph provides a **Resonant Field Interface (RFI)** for LLM integration. The key insight: LLMs generate tokens probabilistically, while Aleph provides deterministic semantic anchors. Coupling these systems enables:
8
+
9
+ - **Entropy-bounded generation**: Prevent hallucination by constraining output entropy
10
+ - **Semantic validation**: Verify LLM outputs against prime-semantic ground truth
11
+ - **Concept grounding**: Translate between natural language and formal meaning
12
+ - **Coherent reasoning**: Guide multi-step inference through entropy minimization
13
+
14
+ ---
15
+
16
+ ## Architecture
17
+
18
+ ```
19
+ ┌─────────────────────────────────────────────────────────────────┐
20
+ │ Coupled System │
21
+ │ │
22
+ │ ┌─────────────┐ Resonant Field ┌─────────────────┐ │
23
+ │ │ │ ←─────────────────────→ │ │ │
24
+ │ │ LLM │ (Bidirectional) │ Aleph │ │
25
+ │ │ │ │ │ │
26
+ │ └──────┬──────┘ └────────┬────────┘ │
27
+ │ │ │ │
28
+ │ ↓ ↓ │
29
+ │ Token Stream Prime Field │
30
+ │ (Probabilistic) (Deterministic) │
31
+ │ │
32
+ │ ┌─────────────────────────────────────────────────────────┐ │
33
+ │ │ Coupling Layer │ │
34
+ │ │ │ │
35
+ │ │ • Entropy Monitor • Semantic Validator │ │
36
+ │ │ • Field Projection • Collapse Trigger │ │
37
+ │ │ • Token→Prime Map • Prime→Token Generation │ │
38
+ │ └─────────────────────────────────────────────────────────┘ │
39
+ └─────────────────────────────────────────────────────────────────┘
40
+ ```
41
+
42
+ ---
43
+
44
+ ## Basic Coupling
45
+
46
+ ### Creating the Coupled System
47
+
48
+ ```javascript
49
+ const { createEngine } = require('./modular');
50
+ const { LLMCoupling } = require('./core/llm');
51
+
52
+ // Create Aleph engine
53
+ const config = require('./data.json');
54
+ const engine = createEngine('semantic', config);
55
+
56
+ // Create coupling layer
57
+ const coupling = new LLMCoupling(engine, {
58
+ entropyThreshold: 0.3, // Max allowed entropy
59
+ coherenceThreshold: 0.7, // Min required coherence
60
+ collapseRate: 0.8 // Collapse strength
61
+ });
62
+
63
+ // Connect to LLM (example with OpenAI-compatible API)
64
+ coupling.connectLLM({
65
+ endpoint: 'http://localhost:8080/v1/chat/completions',
66
+ model: 'local-model'
67
+ });
68
+ ```
69
+
70
+ ### Simple Query Processing
71
+
72
+ ```javascript
73
+ // Process a query through the coupled system
74
+ async function processQuery(query) {
75
+ // 1. Encode query to prime field
76
+ const queryField = coupling.encodeToField(query);
77
+
78
+ // 2. Get LLM response
79
+ const llmResponse = await coupling.queryLLM(query);
80
+
81
+ // 3. Validate response against field
82
+ const validation = coupling.validateResponse(llmResponse, queryField);
83
+
84
+ if (validation.coherent) {
85
+ return llmResponse;
86
+ } else {
87
+ // Re-query with tighter constraints
88
+ return coupling.constrainedQuery(query, queryField);
89
+ }
90
+ }
91
+
92
+ const result = await processQuery('What is the relationship between truth and wisdom?');
93
+ console.log(result);
94
+ ```
95
+
96
+ ---
97
+
98
+ ## Entropy-Bounded Generation
99
+
100
+ ### Monitoring Token Entropy
101
+
102
+ ```javascript
103
+ // Track entropy during generation
104
+ class EntropyMonitor {
105
+ constructor(coupling, threshold) {
106
+ this.coupling = coupling;
107
+ this.threshold = threshold;
108
+ this.history = [];
109
+ }
110
+
111
+ processToken(token, context) {
112
+ // Encode token to field
113
+ const tokenField = this.coupling.encodeToField(token);
114
+
115
+ // Calculate contextual entropy
116
+ const entropy = tokenField.entropy();
117
+ this.history.push({ token, entropy });
118
+
119
+ if (entropy > this.threshold) {
120
+ return {
121
+ accept: false,
122
+ reason: 'entropy_exceeded',
123
+ entropy
124
+ };
125
+ }
126
+
127
+ return { accept: true, entropy };
128
+ }
129
+
130
+ getAverageEntropy() {
131
+ if (this.history.length === 0) return 0;
132
+ return this.history.reduce((sum, h) => sum + h.entropy, 0) / this.history.length;
133
+ }
134
+ }
135
+
136
+ const monitor = new EntropyMonitor(coupling, 0.5);
137
+ ```
138
+
139
+ ### Constrained Generation
140
+
141
+ ```javascript
142
+ // Generate with entropy constraints
143
+ async function constrainedGenerate(prompt, coupling, maxEntropy) {
144
+ const tokens = [];
145
+ let context = prompt;
146
+
147
+ while (true) {
148
+ // Get next token candidates from LLM
149
+ const candidates = await coupling.getTokenCandidates(context, 10);
150
+
151
+ // Score candidates by entropy
152
+ const scored = candidates.map(token => ({
153
+ token,
154
+ entropy: coupling.encodeToField(token).entropy()
155
+ }));
156
+
157
+ // Select lowest entropy token that makes sense
158
+ scored.sort((a, b) => a.entropy - b.entropy);
159
+
160
+ for (const candidate of scored) {
161
+ if (candidate.entropy <= maxEntropy) {
162
+ tokens.push(candidate.token);
163
+ context += candidate.token;
164
+ break;
165
+ }
166
+ }
167
+
168
+ // Check for completion
169
+ if (tokens[tokens.length - 1] === '</end>') break;
170
+ if (tokens.length > 100) break;
171
+ }
172
+
173
+ return tokens.join(' ');
174
+ }
175
+ ```
176
+
177
+ ---
178
+
179
+ ## Semantic Validation
180
+
181
+ ### Coherence Checking
182
+
183
+ ```javascript
184
+ // Validate LLM output against semantic expectations
185
+ function validateCoherence(response, expectedConcepts, coupling) {
186
+ const responseField = coupling.encodeToField(response);
187
+
188
+ const coherenceScores = expectedConcepts.map(concept => ({
189
+ concept,
190
+ coherence: responseField.coherence(coupling.encodeToField(concept))
191
+ }));
192
+
193
+ const avgCoherence = coherenceScores.reduce((s, c) => s + c.coherence, 0)
194
+ / coherenceScores.length;
195
+
196
+ return {
197
+ valid: avgCoherence >= coupling.coherenceThreshold,
198
+ avgCoherence,
199
+ scores: coherenceScores
200
+ };
201
+ }
202
+
203
+ // Example usage
204
+ const response = await coupling.queryLLM('Define wisdom');
205
+ const validation = validateCoherence(
206
+ response,
207
+ ['knowledge', 'understanding', 'experience'],
208
+ coupling
209
+ );
210
+
211
+ console.log('Coherence check:', validation);
212
+ ```
213
+
214
+ ### Contradiction Detection
215
+
216
+ ```javascript
217
+ // Check for internal contradictions in response
218
+ function detectContradictions(response, coupling) {
219
+ const sentences = response.split(/[.!?]+/).filter(s => s.trim());
220
+ const contradictions = [];
221
+
222
+ for (let i = 0; i < sentences.length; i++) {
223
+ for (let j = i + 1; j < sentences.length; j++) {
224
+ const field1 = coupling.encodeToField(sentences[i]);
225
+ const field2 = coupling.encodeToField(sentences[j]);
226
+
227
+ // Check for zero-divisor relationship (contradiction)
228
+ if (field1.isZeroDivisorWith(field2)) {
229
+ contradictions.push({
230
+ sentence1: sentences[i],
231
+ sentence2: sentences[j],
232
+ type: 'zero-divisor'
233
+ });
234
+ }
235
+
236
+ // Check for very low coherence (semantic opposition)
237
+ const coherence = field1.coherence(field2);
238
+ if (coherence < 0.1) {
239
+ contradictions.push({
240
+ sentence1: sentences[i],
241
+ sentence2: sentences[j],
242
+ type: 'low-coherence',
243
+ coherence
244
+ });
245
+ }
246
+ }
247
+ }
248
+
249
+ return contradictions;
250
+ }
251
+ ```
252
+
253
+ ---
254
+
255
+ ## Guided Reasoning
256
+
257
+ ### Step-by-Step Reasoning
258
+
259
+ ```javascript
260
+ // Guide LLM through reasoning steps with entropy minimization
261
+ async function guidedReasoning(question, coupling) {
262
+ const steps = [];
263
+ let currentField = coupling.encodeToField(question);
264
+ let currentEntropy = currentField.entropy();
265
+
266
+ while (currentEntropy > 0.1) {
267
+ // Ask LLM for next reasoning step
268
+ const stepPrompt = `
269
+ Current understanding: ${steps.map(s => s.text).join(' ')}
270
+ Question: ${question}
271
+ What is the next logical step toward an answer?
272
+ `;
273
+
274
+ const stepResponse = await coupling.queryLLM(stepPrompt);
275
+ const stepField = coupling.encodeToField(stepResponse);
276
+ const newEntropy = stepField.entropy();
277
+
278
+ // Only accept if entropy decreases
279
+ if (newEntropy < currentEntropy) {
280
+ steps.push({
281
+ text: stepResponse,
282
+ entropy: newEntropy,
283
+ drop: currentEntropy - newEntropy
284
+ });
285
+
286
+ currentField = stepField;
287
+ currentEntropy = newEntropy;
288
+ } else {
289
+ // Entropy increased - try again with constraint
290
+ const constrained = await coupling.constrainedQuery(stepPrompt, currentField);
291
+ const constrainedField = coupling.encodeToField(constrained);
292
+
293
+ if (constrainedField.entropy() < currentEntropy) {
294
+ steps.push({
295
+ text: constrained,
296
+ entropy: constrainedField.entropy(),
297
+ drop: currentEntropy - constrainedField.entropy()
298
+ });
299
+
300
+ currentField = constrainedField;
301
+ currentEntropy = constrainedField.entropy();
302
+ }
303
+ }
304
+
305
+ // Prevent infinite loops
306
+ if (steps.length > 10) break;
307
+ }
308
+
309
+ return {
310
+ question,
311
+ steps,
312
+ finalEntropy: currentEntropy,
313
+ converged: currentEntropy <= 0.1
314
+ };
315
+ }
316
+ ```
317
+
318
+ ### Concept Navigation
319
+
320
+ ```javascript
321
+ // Navigate concept space with LLM guidance
322
+ async function navigateConcepts(startConcept, targetConcept, coupling) {
323
+ const path = [startConcept];
324
+ let current = coupling.encodeToField(startConcept);
325
+ const target = coupling.encodeToField(targetConcept);
326
+
327
+ while (current.coherence(target) < 0.9) {
328
+ // Ask LLM for bridge concept
329
+ const prompt = `
330
+ Starting concept: ${path[0]}
331
+ Current position: ${path[path.length - 1]}
332
+ Target concept: ${targetConcept}
333
+
334
+ What is an intermediate concept that connects ${path[path.length - 1]} closer to ${targetConcept}?
335
+ `;
336
+
337
+ const bridge = await coupling.queryLLM(prompt);
338
+ const bridgeField = coupling.encodeToField(bridge);
339
+
340
+ // Check if bridge moves us closer
341
+ if (bridgeField.coherence(target) > current.coherence(target)) {
342
+ path.push(bridge);
343
+ current = bridgeField;
344
+ } else {
345
+ // Dead end - backtrack
346
+ if (path.length > 1) {
347
+ path.pop();
348
+ current = coupling.encodeToField(path[path.length - 1]);
349
+ }
350
+ }
351
+
352
+ if (path.length > 20) break; // Prevent infinite loops
353
+ }
354
+
355
+ return {
356
+ start: startConcept,
357
+ target: targetConcept,
358
+ path,
359
+ finalCoherence: current.coherence(target)
360
+ };
361
+ }
362
+ ```
363
+
364
+ ---
365
+
366
+ ## Bidirectional Translation
367
+
368
+ ### Natural Language to Primes
369
+
370
+ ```javascript
371
+ // Convert LLM's natural language to prime encoding
372
+ async function naturalToPrimes(text, coupling) {
373
+ // First, use Aleph's tokenizer
374
+ const tokens = coupling.engine.backend.tokenize(text);
375
+
376
+ // For unknown words, ask LLM to define
377
+ const unknownTokens = tokens.filter(t => !t.known);
378
+
379
+ for (const token of unknownTokens) {
380
+ const definitionPrompt = `
381
+ Define "${token.word}" using only these fundamental concepts:
382
+ ${coupling.engine.backend.getOntologyTerms().join(', ')}
383
+
384
+ Be concise, use only 2-3 terms.
385
+ `;
386
+
387
+ const definition = await coupling.queryLLM(definitionPrompt);
388
+ const defPrimes = coupling.engine.backend.encode(definition);
389
+
390
+ // Learn the new word
391
+ coupling.engine.backend.learn(token.word, defPrimes);
392
+ }
393
+
394
+ // Now encode with learned vocabulary
395
+ return coupling.engine.backend.encode(text);
396
+ }
397
+ ```
398
+
399
+ ### Primes to Natural Language
400
+
401
+ ```javascript
402
+ // Generate natural language from prime encoding
403
+ async function primesToNatural(primes, style, coupling) {
404
+ // Get base decoding
405
+ const baseText = coupling.engine.backend.decode(primes);
406
+
407
+ // Ask LLM to elaborate
408
+ const prompt = `
409
+ Given these core concepts: ${baseText}
410
+
411
+ Generate a ${style} explanation that captures this meaning.
412
+ Keep the semantic content intact but make it natural and flowing.
413
+ `;
414
+
415
+ const elaborated = await coupling.queryLLM(prompt);
416
+
417
+ // Verify semantic preservation
418
+ const originalField = coupling.engine.backend.primesToState(primes);
419
+ const elaboratedField = coupling.encodeToField(elaborated);
420
+
421
+ const coherence = originalField.coherence(elaboratedField);
422
+
423
+ return {
424
+ text: elaborated,
425
+ coherence,
426
+ preserved: coherence > 0.8
427
+ };
428
+ }
429
+ ```
430
+
431
+ ---
432
+
433
+ ## Practical Patterns
434
+
435
+ ### Question Answering with Validation
436
+
437
+ ```javascript
438
+ async function validateQA(question, coupling) {
439
+ // Get answer from LLM
440
+ const answer = await coupling.queryLLM(question);
441
+
442
+ // Encode both
443
+ const questionField = coupling.encodeToField(question);
444
+ const answerField = coupling.encodeToField(answer);
445
+
446
+ // Calculate relevance
447
+ const relevance = questionField.coherence(answerField);
448
+
449
+ // Calculate answer entropy
450
+ const answerEntropy = answerField.entropy();
451
+
452
+ // Check for hallucination signals
453
+ const hallucination = {
454
+ highEntropy: answerEntropy > 0.8,
455
+ lowRelevance: relevance < 0.3,
456
+ contradictions: detectContradictions(answer, coupling)
457
+ };
458
+
459
+ const isReliable = !hallucination.highEntropy &&
460
+ !hallucination.lowRelevance &&
461
+ hallucination.contradictions.length === 0;
462
+
463
+ return {
464
+ question,
465
+ answer,
466
+ relevance,
467
+ entropy: answerEntropy,
468
+ reliable: isReliable,
469
+ hallucination
470
+ };
471
+ }
472
+ ```
473
+
474
+ ### Iterative Refinement
475
+
476
+ ```javascript
477
+ async function iterativeRefinement(task, maxIterations, coupling) {
478
+ let current = await coupling.queryLLM(task);
479
+ let currentField = coupling.encodeToField(current);
480
+ let currentEntropy = currentField.entropy();
481
+
482
+ const iterations = [{
483
+ text: current,
484
+ entropy: currentEntropy,
485
+ iteration: 0
486
+ }];
487
+
488
+ for (let i = 1; i <= maxIterations; i++) {
489
+ if (currentEntropy < 0.2) break; // Good enough
490
+
491
+ const refinePrompt = `
492
+ Original task: ${task}
493
+ Current response: ${current}
494
+
495
+ This response has high uncertainty. Please refine it to be:
496
+ - More specific
497
+ - More coherent
498
+ - More focused
499
+
500
+ Refined response:
501
+ `;
502
+
503
+ const refined = await coupling.queryLLM(refinePrompt);
504
+ const refinedField = coupling.encodeToField(refined);
505
+ const refinedEntropy = refinedField.entropy();
506
+
507
+ if (refinedEntropy < currentEntropy) {
508
+ current = refined;
509
+ currentField = refinedField;
510
+ currentEntropy = refinedEntropy;
511
+ }
512
+
513
+ iterations.push({
514
+ text: current,
515
+ entropy: currentEntropy,
516
+ iteration: i
517
+ });
518
+ }
519
+
520
+ return {
521
+ task,
522
+ final: current,
523
+ finalEntropy: currentEntropy,
524
+ iterations
525
+ };
526
+ }
527
+ ```
528
+
529
+ ---
530
+
531
+ ## Configuration Options
532
+
533
+ ```javascript
534
+ const coupling = new LLMCoupling(engine, {
535
+ // Entropy control
536
+ entropyThreshold: 0.3, // Max allowed response entropy
537
+ entropyWarning: 0.5, // Entropy level for warnings
538
+
539
+ // Coherence settings
540
+ coherenceThreshold: 0.7, // Min coherence with query
541
+ semanticValidation: true, // Enable semantic checking
542
+
543
+ // Collapse behavior
544
+ collapseRate: 0.8, // How aggressively to collapse states
545
+ collapseOnHighEntropy: true, // Auto-collapse on entropy spike
546
+
547
+ // Generation control
548
+ maxTokens: 500, // Max tokens per response
549
+ temperature: 0.7, // LLM temperature
550
+ topP: 0.9, // Nucleus sampling
551
+
552
+ // Caching
553
+ cacheResponses: true, // Cache LLM responses
554
+ cacheSemanticFields: true, // Cache semantic field computations
555
+
556
+ // Debugging
557
+ logLevel: 'info', // 'debug', 'info', 'warn', 'error'
558
+ trackMetrics: true // Record performance metrics
559
+ });
560
+ ```
561
+
562
+ ---
563
+
564
+ ## Next Steps
565
+
566
+ - [Advanced Topics →](./06-advanced.md)
567
+ - [Theory: Resonant Field Interface →](../theory/07-resonant-field-interface.md)
568
+ - [Reference: LLMCoupling →](../reference/04-engine.md#llm-coupling)