@aleph-ai/tinyaleph 1.5.7 → 1.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/core/alexander-module.js +1469 -0
- package/core/arithmetic-link-kernel.js +1338 -0
- package/core/emotion.js +565 -0
- package/core/gravity.js +714 -0
- package/core/hilbert.js +506 -3
- package/core/index.js +132 -4
- package/core/nonlocal.js +744 -0
- package/core/oracle.js +662 -0
- package/examples/01-hello-world.js +69 -0
- package/examples/02-basic-hash.js +90 -0
- package/examples/02-observer-stack.js +385 -0
- package/examples/03-quantum-coin.js +136 -0
- package/examples/05-symbolic-resonance.js +146 -0
- package/examples/06-symbol-database.js +150 -0
- package/examples/07-semantic-inference.js +223 -0
- package/examples/08-compound-symbols.js +219 -0
- package/examples/README.md +170 -0
- package/examples/ai/01-embeddings.js +155 -0
- package/examples/ai/02-semantic-memory.js +243 -0
- package/examples/ai/03-reasoning.js +243 -0
- package/examples/ai/04-knowledge-graph.js +279 -0
- package/examples/ai/05-llm-integration.js +333 -0
- package/examples/ai/06-agent.js +294 -0
- package/examples/ai/07-hybrid-ai.js +223 -0
- package/examples/ai/08-entropy-reasoning.js +259 -0
- package/examples/ai/09-concept-learning.js +271 -0
- package/examples/ai/10-prompt-primes.js +312 -0
- package/examples/ai/11-rag.js +332 -0
- package/examples/ai/12-neuro-symbolic.js +321 -0
- package/examples/ai/README.md +80 -0
- package/examples/arithmetic-topology/01-legendre-symbol.js +78 -0
- package/examples/arithmetic-topology/02-redei-symbol.js +126 -0
- package/examples/arithmetic-topology/03-alk-kuramoto.js +138 -0
- package/examples/arithmetic-topology/04-alexander-module.js +117 -0
- package/examples/arithmetic-topology/05-signature-memory.js +118 -0
- package/examples/arithmetic-topology/README.md +291 -0
- package/examples/bioinformatics/01-dna-encoding.js +108 -0
- package/examples/bioinformatics/02-central-dogma.js +162 -0
- package/examples/bioinformatics/03-protein-folding.js +206 -0
- package/examples/bioinformatics/04-dna-computing.js +192 -0
- package/examples/bioinformatics/05-molecular-binding.js +209 -0
- package/examples/book-operators-demo.js +155 -0
- package/examples/chat.js +105 -0
- package/examples/crt-homology/01-residue-encoding.js +87 -0
- package/examples/crt-homology/02-birkhoff-attention.js +100 -0
- package/examples/crt-homology/03-homology-loss.js +132 -0
- package/examples/crt-homology/04-crt-resoformer.js +132 -0
- package/examples/crt-homology/README.md +67 -0
- package/examples/crypto/01-password-hash.js +210 -0
- package/examples/crypto/02-key-derivation.js +210 -0
- package/examples/crypto/03-hmac.js +229 -0
- package/examples/crypto/04-file-integrity.js +263 -0
- package/examples/crypto/05-content-hash.js +263 -0
- package/examples/crypto/README.md +99 -0
- package/examples/demo-modular.js +223 -0
- package/examples/demo-two-layer.js +196 -0
- package/examples/discrete/01-integer-sine-table.js +120 -0
- package/examples/discrete/02-codebook-tunneling.js +118 -0
- package/examples/discrete/03-canonical-fusion.js +135 -0
- package/examples/discrete/04-tick-gate.js +139 -0
- package/examples/discrete/README.md +142 -0
- package/examples/emotion-demo.js +200 -0
- package/examples/formal-semantics/01-typed-terms.js +156 -0
- package/examples/formal-semantics/02-reduction.js +202 -0
- package/examples/formal-semantics/03-lambda-translation.js +206 -0
- package/examples/formal-semantics/04-enochian-language.js +257 -0
- package/examples/formal-semantics/README.md +98 -0
- package/examples/gravity-demo.js +190 -0
- package/examples/math/01-quaternions.js +237 -0
- package/examples/math/02-octonions.js +192 -0
- package/examples/math/03-prime-factorization.js +215 -0
- package/examples/math/04-vector-spaces.js +210 -0
- package/examples/math/05-gaussian-primes.js +234 -0
- package/examples/math/README.md +93 -0
- package/examples/nonlocal-demo.js +237 -0
- package/examples/oracle-demo.js +204 -0
- package/examples/physics/01-oscillator.js +177 -0
- package/examples/physics/02-lyapunov.js +201 -0
- package/examples/physics/03-collapse.js +183 -0
- package/examples/physics/04-kuramoto.js +212 -0
- package/examples/physics/05-entropy.js +226 -0
- package/examples/physics/05-sync-models.js +298 -0
- package/examples/physics/06-primeon-ladder.js +233 -0
- package/examples/physics/07-kuramoto-coupled-ladder.js +298 -0
- package/examples/physics/README.md +126 -0
- package/examples/quantum/01-prime-hunter.js +79 -0
- package/examples/quantum/02-entanglement-demo.js +79 -0
- package/examples/quantum/03-wave-analysis.js +63 -0
- package/examples/resonance/01-prime-hilbert-space.js +140 -0
- package/examples/resonance/02-prime-resonance-network.js +221 -0
- package/examples/resonance/03-resoformer.js +349 -0
- package/examples/resonance/04-resoformer-training.js +329 -0
- package/examples/resonance/05-language-model.js +484 -0
- package/examples/resonance/README.md +238 -0
- package/examples/run-examples.js +427 -0
- package/examples/scientific/01-single-qubit.js +185 -0
- package/examples/scientific/02-two-qubit.js +209 -0
- package/examples/scientific/03-quantum-circuits.js +270 -0
- package/examples/scientific/04-measurement.js +229 -0
- package/examples/scientific/05-algorithms.js +245 -0
- package/examples/scientific/06-random.js +225 -0
- package/examples/scientific/07-wavefunction.js +192 -0
- package/examples/scientific/README.md +118 -0
- package/examples/semantic/01-vocabulary.js +186 -0
- package/examples/semantic/02-similarity.js +263 -0
- package/examples/semantic/03-word-algebra.js +295 -0
- package/examples/semantic/04-clustering.js +348 -0
- package/examples/semantic/05-classification.js +386 -0
- package/examples/semantic/06-dna-encoding.js +228 -0
- package/examples/semantic/07-search.js +304 -0
- package/examples/semantic/08-qa-system.js +278 -0
- package/examples/semantic/README.md +116 -0
- package/examples/topology/01-108-invariant.js +81 -0
- package/examples/topology/02-trefoil-constants.js +112 -0
- package/examples/topology/03-gauge-symmetry.js +112 -0
- package/examples/topology/04-free-energy-dynamics.js +124 -0
- package/examples/topology/README.md +129 -0
- package/index.js +32 -0
- package/modular.js +63 -2
- package/package.json +8 -3
- package/physics/alk-kuramoto.js +817 -0
- package/physics/index.js +23 -2
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @example Knowledge Graph
|
|
3
|
+
* @description Build and query a prime-based knowledge graph
|
|
4
|
+
*
|
|
5
|
+
* A knowledge graph stores entities and relationships. TinyAleph enhances
|
|
6
|
+
* this with prime-based embeddings that enable:
|
|
7
|
+
* - Semantic entity matching
|
|
8
|
+
* - Relationship inference
|
|
9
|
+
* - Path finding through meaning space
|
|
10
|
+
* - Multi-hop reasoning
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
const { SemanticBackend, Hypercomplex } = require('../../modular');
|
|
14
|
+
|
|
15
|
+
// ===========================================
|
|
16
|
+
// SETUP
|
|
17
|
+
// ===========================================
|
|
18
|
+
|
|
19
|
+
const backend = new SemanticBackend({ dimension: 16 });
|
|
20
|
+
|
|
21
|
+
// ===========================================
|
|
22
|
+
// KNOWLEDGE GRAPH CLASS
|
|
23
|
+
// ===========================================
|
|
24
|
+
|
|
25
|
+
class KnowledgeGraph {
|
|
26
|
+
constructor(backend) {
|
|
27
|
+
this.backend = backend;
|
|
28
|
+
this.entities = new Map();
|
|
29
|
+
this.relations = [];
|
|
30
|
+
this.entityIndex = 0;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
// Add an entity with properties
|
|
34
|
+
addEntity(name, type, properties = {}) {
|
|
35
|
+
const description = `${name} is a ${type}. ${Object.entries(properties).map(([k, v]) => `${k}: ${v}`).join('. ')}`;
|
|
36
|
+
const id = ++this.entityIndex;
|
|
37
|
+
|
|
38
|
+
this.entities.set(id, {
|
|
39
|
+
id,
|
|
40
|
+
name,
|
|
41
|
+
type,
|
|
42
|
+
properties,
|
|
43
|
+
embedding: this.backend.textToOrderedState(description)
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
return id;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// Add a relationship between entities
|
|
50
|
+
addRelation(fromId, relation, toId, properties = {}) {
|
|
51
|
+
const from = this.entities.get(fromId);
|
|
52
|
+
const to = this.entities.get(toId);
|
|
53
|
+
if (!from || !to) throw new Error('Entity not found');
|
|
54
|
+
|
|
55
|
+
const description = `${from.name} ${relation} ${to.name}`;
|
|
56
|
+
|
|
57
|
+
this.relations.push({
|
|
58
|
+
fromId,
|
|
59
|
+
toId,
|
|
60
|
+
relation,
|
|
61
|
+
properties,
|
|
62
|
+
embedding: this.backend.textToOrderedState(description)
|
|
63
|
+
});
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// Compute similarity
|
|
67
|
+
similarity(a, b) {
|
|
68
|
+
let dot = 0, magA = 0, magB = 0;
|
|
69
|
+
for (let i = 0; i < a.c.length; i++) {
|
|
70
|
+
dot += a.c[i] * b.c[i];
|
|
71
|
+
magA += a.c[i] * a.c[i];
|
|
72
|
+
magB += b.c[i] * b.c[i];
|
|
73
|
+
}
|
|
74
|
+
return dot / (Math.sqrt(magA) * Math.sqrt(magB));
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// Find entities by semantic query
|
|
78
|
+
findEntities(query, topK = 5) {
|
|
79
|
+
const queryEmbed = this.backend.textToOrderedState(query);
|
|
80
|
+
const results = [];
|
|
81
|
+
|
|
82
|
+
for (const [id, entity] of this.entities) {
|
|
83
|
+
const sim = this.similarity(queryEmbed, entity.embedding);
|
|
84
|
+
results.push({ entity, similarity: sim });
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
results.sort((a, b) => b.similarity - a.similarity);
|
|
88
|
+
return results.slice(0, topK);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// Find relations involving an entity
|
|
92
|
+
getRelations(entityId, direction = 'both') {
|
|
93
|
+
return this.relations.filter(r => {
|
|
94
|
+
if (direction === 'out') return r.fromId === entityId;
|
|
95
|
+
if (direction === 'in') return r.toId === entityId;
|
|
96
|
+
return r.fromId === entityId || r.toId === entityId;
|
|
97
|
+
});
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// Semantic relation search
|
|
101
|
+
findRelations(query, topK = 5) {
|
|
102
|
+
const queryEmbed = this.backend.textToOrderedState(query);
|
|
103
|
+
const results = [];
|
|
104
|
+
|
|
105
|
+
for (const rel of this.relations) {
|
|
106
|
+
const sim = this.similarity(queryEmbed, rel.embedding);
|
|
107
|
+
results.push({ relation: rel, similarity: sim });
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
results.sort((a, b) => b.similarity - a.similarity);
|
|
111
|
+
return results.slice(0, topK);
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
// Find paths between entities (BFS with semantic scoring)
|
|
115
|
+
findPath(fromId, toId, maxDepth = 4) {
|
|
116
|
+
const queue = [{ path: [fromId], relations: [] }];
|
|
117
|
+
const visited = new Set([fromId]);
|
|
118
|
+
|
|
119
|
+
while (queue.length > 0) {
|
|
120
|
+
const { path, relations } = queue.shift();
|
|
121
|
+
const currentId = path[path.length - 1];
|
|
122
|
+
|
|
123
|
+
if (currentId === toId) {
|
|
124
|
+
return { path, relations };
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
if (path.length >= maxDepth) continue;
|
|
128
|
+
|
|
129
|
+
const nextRels = this.getRelations(currentId, 'out');
|
|
130
|
+
for (const rel of nextRels) {
|
|
131
|
+
const nextId = rel.fromId === currentId ? rel.toId : rel.fromId;
|
|
132
|
+
if (!visited.has(nextId)) {
|
|
133
|
+
visited.add(nextId);
|
|
134
|
+
queue.push({
|
|
135
|
+
path: [...path, nextId],
|
|
136
|
+
relations: [...relations, rel]
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
return null; // No path found
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// Multi-hop query (e.g., "friends of friends who like X")
|
|
146
|
+
multiHopQuery(startId, hops) {
|
|
147
|
+
let currentSet = new Set([startId]);
|
|
148
|
+
|
|
149
|
+
for (const hop of hops) {
|
|
150
|
+
const nextSet = new Set();
|
|
151
|
+
for (const entityId of currentSet) {
|
|
152
|
+
const rels = this.getRelations(entityId, 'out');
|
|
153
|
+
for (const rel of rels) {
|
|
154
|
+
if (rel.relation === hop.relation) {
|
|
155
|
+
nextSet.add(rel.toId);
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
currentSet = nextSet;
|
|
160
|
+
if (currentSet.size === 0) break;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
return Array.from(currentSet).map(id => this.entities.get(id));
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// ===========================================
|
|
168
|
+
// BUILD EXAMPLE GRAPH
|
|
169
|
+
// ===========================================
|
|
170
|
+
|
|
171
|
+
console.log('TinyAleph Knowledge Graph Example');
|
|
172
|
+
console.log('==================================\n');
|
|
173
|
+
|
|
174
|
+
const kg = new KnowledgeGraph(backend);
|
|
175
|
+
|
|
176
|
+
// Add people
|
|
177
|
+
console.log('Building knowledge graph...');
|
|
178
|
+
const alice = kg.addEntity('Alice', 'Person', { occupation: 'Engineer', city: 'San Francisco' });
|
|
179
|
+
const bob = kg.addEntity('Bob', 'Person', { occupation: 'Designer', city: 'New York' });
|
|
180
|
+
const carol = kg.addEntity('Carol', 'Person', { occupation: 'Scientist', city: 'Boston' });
|
|
181
|
+
const dave = kg.addEntity('Dave', 'Person', { occupation: 'Engineer', city: 'Seattle' });
|
|
182
|
+
|
|
183
|
+
// Add companies
|
|
184
|
+
const techCorp = kg.addEntity('TechCorp', 'Company', { industry: 'Technology', size: 'Large' });
|
|
185
|
+
const designStudio = kg.addEntity('DesignStudio', 'Company', { industry: 'Design', size: 'Small' });
|
|
186
|
+
|
|
187
|
+
// Add topics
|
|
188
|
+
const ai = kg.addEntity('Artificial Intelligence', 'Topic', { field: 'Computer Science' });
|
|
189
|
+
const ml = kg.addEntity('Machine Learning', 'Topic', { field: 'Computer Science' });
|
|
190
|
+
|
|
191
|
+
// Add relationships
|
|
192
|
+
kg.addRelation(alice, 'works_at', techCorp);
|
|
193
|
+
kg.addRelation(bob, 'works_at', designStudio);
|
|
194
|
+
kg.addRelation(carol, 'works_at', techCorp);
|
|
195
|
+
kg.addRelation(dave, 'works_at', techCorp);
|
|
196
|
+
|
|
197
|
+
kg.addRelation(alice, 'knows', bob);
|
|
198
|
+
kg.addRelation(alice, 'knows', carol);
|
|
199
|
+
kg.addRelation(bob, 'knows', carol);
|
|
200
|
+
kg.addRelation(carol, 'knows', dave);
|
|
201
|
+
|
|
202
|
+
kg.addRelation(alice, 'interested_in', ai);
|
|
203
|
+
kg.addRelation(carol, 'interested_in', ml);
|
|
204
|
+
kg.addRelation(dave, 'interested_in', ai);
|
|
205
|
+
|
|
206
|
+
console.log(`Created ${kg.entities.size} entities and ${kg.relations.length} relations\n`);
|
|
207
|
+
|
|
208
|
+
// ===========================================
|
|
209
|
+
// SEMANTIC QUERIES
|
|
210
|
+
// ===========================================
|
|
211
|
+
|
|
212
|
+
console.log('==================================');
|
|
213
|
+
console.log('Semantic Entity Search:');
|
|
214
|
+
console.log('==================================\n');
|
|
215
|
+
|
|
216
|
+
const queries = [
|
|
217
|
+
'engineers in technology',
|
|
218
|
+
'people interested in AI',
|
|
219
|
+
'companies in design industry'
|
|
220
|
+
];
|
|
221
|
+
|
|
222
|
+
for (const q of queries) {
|
|
223
|
+
console.log(`Query: "${q}"`);
|
|
224
|
+
const results = kg.findEntities(q, 2);
|
|
225
|
+
for (const { entity, similarity } of results) {
|
|
226
|
+
console.log(` [${similarity.toFixed(3)}] ${entity.name} (${entity.type})`);
|
|
227
|
+
}
|
|
228
|
+
console.log();
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
// ===========================================
|
|
232
|
+
// PATH FINDING
|
|
233
|
+
// ===========================================
|
|
234
|
+
|
|
235
|
+
console.log('==================================');
|
|
236
|
+
console.log('Path Finding:');
|
|
237
|
+
console.log('==================================\n');
|
|
238
|
+
|
|
239
|
+
console.log('Path from Alice to Dave:');
|
|
240
|
+
const path = kg.findPath(alice, dave);
|
|
241
|
+
if (path) {
|
|
242
|
+
const pathNames = path.path.map(id => kg.entities.get(id).name);
|
|
243
|
+
console.log(` Path: ${pathNames.join(' → ')}`);
|
|
244
|
+
console.log(` Via: ${path.relations.map(r => r.relation).join(' → ')}`);
|
|
245
|
+
} else {
|
|
246
|
+
console.log(' No path found');
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
// ===========================================
|
|
250
|
+
// MULTI-HOP QUERIES
|
|
251
|
+
// ===========================================
|
|
252
|
+
|
|
253
|
+
console.log('\n==================================');
|
|
254
|
+
console.log('Multi-Hop Query:');
|
|
255
|
+
console.log('==================================\n');
|
|
256
|
+
|
|
257
|
+
console.log('Who are friends-of-friends of Alice?');
|
|
258
|
+
const fof = kg.multiHopQuery(alice, [
|
|
259
|
+
{ relation: 'knows' },
|
|
260
|
+
{ relation: 'knows' }
|
|
261
|
+
]);
|
|
262
|
+
for (const entity of fof) {
|
|
263
|
+
if (entity.id !== alice) {
|
|
264
|
+
console.log(` ${entity.name}`);
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
// ===========================================
|
|
269
|
+
// KEY TAKEAWAYS
|
|
270
|
+
// ===========================================
|
|
271
|
+
|
|
272
|
+
console.log('\n==================================');
|
|
273
|
+
console.log('KEY TAKEAWAYS:');
|
|
274
|
+
console.log('1. Entities and relations are embedded with primes');
|
|
275
|
+
console.log('2. Semantic search finds similar entities by meaning');
|
|
276
|
+
console.log('3. Path finding discovers connections between entities');
|
|
277
|
+
console.log('4. Multi-hop queries traverse relationship chains');
|
|
278
|
+
console.log('5. Combine with reasoning for inference over the graph');
|
|
279
|
+
console.log('6. Scale to large graphs with indexing strategies');
|
|
@@ -0,0 +1,333 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @example LLM Integration
|
|
3
|
+
* @description Combine TinyAleph with LLM APIs for enhanced AI
|
|
4
|
+
*
|
|
5
|
+
* This example shows how to integrate TinyAleph with Large Language Models:
|
|
6
|
+
* - Pre-process inputs with prime embeddings
|
|
7
|
+
* - Post-process LLM outputs for verification
|
|
8
|
+
* - Use semantic memory for context management
|
|
9
|
+
* - Hybrid reasoning with symbolic + neural
|
|
10
|
+
*
|
|
11
|
+
* NOTE: This example simulates LLM responses. In production, replace
|
|
12
|
+
* the simulateLLM function with actual API calls (OpenAI, Anthropic, etc.)
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
const { SemanticBackend, Hypercomplex } = require('../../modular');
|
|
16
|
+
|
|
17
|
+
// ===========================================
|
|
18
|
+
// SETUP
|
|
19
|
+
// ===========================================
|
|
20
|
+
|
|
21
|
+
const backend = new SemanticBackend({ dimension: 16 });
|
|
22
|
+
|
|
23
|
+
// Simulated LLM for demonstration (replace with real API)
|
|
24
|
+
function simulateLLM(prompt) {
|
|
25
|
+
const lower = prompt.toLowerCase();
|
|
26
|
+
|
|
27
|
+
// Extract just the new question (after "New question:" if present)
|
|
28
|
+
const hasContext = lower.includes('relevant context');
|
|
29
|
+
const newQuestionMatch = prompt.match(/New question:\s*(.+)/is);
|
|
30
|
+
const question = newQuestionMatch
|
|
31
|
+
? newQuestionMatch[1].toLowerCase().trim()
|
|
32
|
+
: lower;
|
|
33
|
+
|
|
34
|
+
// Summarization - check first since it's most specific
|
|
35
|
+
if (question.includes('summarize') || question.includes('key points')) {
|
|
36
|
+
if (hasContext) {
|
|
37
|
+
return 'To summarize our AI discussion: (1) Machine learning enables systems to learn from data automatically, (2) Deep learning extends this with multi-layered neural networks for complex pattern recognition, (3) Both are subsets of AI that are transforming industries through data-driven decision making.';
|
|
38
|
+
}
|
|
39
|
+
return 'Key AI concepts: autonomous learning, pattern recognition, and data-driven decision making.';
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// Deep learning comparison (with context awareness)
|
|
43
|
+
if (question.includes('deep learning') || question.includes('compare')) {
|
|
44
|
+
if (hasContext) {
|
|
45
|
+
return 'Building on the previous discussion of machine learning, deep learning is a specialized subset that uses neural networks with many layers (hence "deep"). While classical ML algorithms like decision trees work well with smaller datasets, deep learning excels at processing unstructured data like images and text at scale.';
|
|
46
|
+
}
|
|
47
|
+
return 'Deep learning uses multi-layered neural networks to progressively extract higher-level features from raw input, enabling automatic feature learning without manual engineering.';
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// Machine learning explanation
|
|
51
|
+
if (question.includes('machine learning') && question.includes('explain')) {
|
|
52
|
+
return 'Machine learning is a subset of artificial intelligence that enables systems to learn and improve from experience without being explicitly programmed. It focuses on developing algorithms that can access data and use it to learn for themselves.';
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// Neural networks
|
|
56
|
+
if (question.includes('neural network')) {
|
|
57
|
+
return 'Neural networks are computing systems inspired by biological neural networks. They consist of interconnected nodes (neurons) organized in layers that process information using connectionist approaches to computation.';
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// Ethics
|
|
61
|
+
if (question.includes('ethics') || question.includes('ethical')) {
|
|
62
|
+
return 'AI ethics encompasses fairness, transparency, privacy, and accountability. Key concerns include algorithmic bias, job displacement, and ensuring AI systems align with human values.';
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// Default
|
|
66
|
+
return 'I can help explain AI concepts, compare technologies, or summarize information. What would you like to know?';
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// ===========================================
|
|
70
|
+
// SEMANTIC CONTEXT MANAGER
|
|
71
|
+
// ===========================================
|
|
72
|
+
|
|
73
|
+
class SemanticContextManager {
|
|
74
|
+
constructor(backend, maxContext = 10) {
|
|
75
|
+
this.backend = backend;
|
|
76
|
+
this.maxContext = maxContext;
|
|
77
|
+
this.history = [];
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// Compute similarity
|
|
81
|
+
similarity(a, b) {
|
|
82
|
+
let dot = 0, magA = 0, magB = 0;
|
|
83
|
+
for (let i = 0; i < a.c.length; i++) {
|
|
84
|
+
dot += a.c[i] * b.c[i];
|
|
85
|
+
magA += a.c[i] * a.c[i];
|
|
86
|
+
magB += b.c[i] * b.c[i];
|
|
87
|
+
}
|
|
88
|
+
return dot / (Math.sqrt(magA) * Math.sqrt(magB));
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// Add exchange to history
|
|
92
|
+
addExchange(query, response) {
|
|
93
|
+
const queryEmbed = this.backend.textToOrderedState(query);
|
|
94
|
+
const responseEmbed = this.backend.textToOrderedState(response);
|
|
95
|
+
|
|
96
|
+
this.history.push({
|
|
97
|
+
query,
|
|
98
|
+
response,
|
|
99
|
+
queryEmbed,
|
|
100
|
+
responseEmbed,
|
|
101
|
+
timestamp: Date.now()
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
// Trim to max context
|
|
105
|
+
if (this.history.length > this.maxContext) {
|
|
106
|
+
this.history = this.history.slice(-this.maxContext);
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// Get relevant context for a new query
|
|
111
|
+
getRelevantContext(query, topK = 3) {
|
|
112
|
+
const queryEmbed = this.backend.textToOrderedState(query);
|
|
113
|
+
|
|
114
|
+
const scored = this.history.map(h => ({
|
|
115
|
+
...h,
|
|
116
|
+
relevance: this.similarity(queryEmbed, h.queryEmbed)
|
|
117
|
+
}));
|
|
118
|
+
|
|
119
|
+
scored.sort((a, b) => b.relevance - a.relevance);
|
|
120
|
+
return scored.slice(0, topK);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// Build context string for LLM
|
|
124
|
+
buildContextString(query) {
|
|
125
|
+
const relevant = this.getRelevantContext(query);
|
|
126
|
+
if (relevant.length === 0) return '';
|
|
127
|
+
|
|
128
|
+
const contextParts = relevant.map(h =>
|
|
129
|
+
`Previous Q: ${h.query}\nPrevious A: ${h.response}`
|
|
130
|
+
);
|
|
131
|
+
|
|
132
|
+
return 'Relevant context from conversation:\n' + contextParts.join('\n\n');
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// ===========================================
|
|
137
|
+
// RESPONSE VERIFIER
|
|
138
|
+
// ===========================================
|
|
139
|
+
|
|
140
|
+
class ResponseVerifier {
|
|
141
|
+
constructor(backend) {
|
|
142
|
+
this.backend = backend;
|
|
143
|
+
this.knownFacts = new Map();
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// Compute similarity
|
|
147
|
+
similarity(a, b) {
|
|
148
|
+
let dot = 0, magA = 0, magB = 0;
|
|
149
|
+
for (let i = 0; i < a.c.length; i++) {
|
|
150
|
+
dot += a.c[i] * b.c[i];
|
|
151
|
+
magA += a.c[i] * a.c[i];
|
|
152
|
+
magB += b.c[i] * b.c[i];
|
|
153
|
+
}
|
|
154
|
+
return dot / (Math.sqrt(magA) * Math.sqrt(magB));
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
// Add known facts for verification
|
|
158
|
+
addFact(statement, confidence = 1.0) {
|
|
159
|
+
this.knownFacts.set(statement, {
|
|
160
|
+
embedding: this.backend.textToOrderedState(statement),
|
|
161
|
+
confidence
|
|
162
|
+
});
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
// Check if response aligns with known facts
|
|
166
|
+
verify(response) {
|
|
167
|
+
const responseEmbed = this.backend.textToOrderedState(response);
|
|
168
|
+
|
|
169
|
+
let maxAlignment = 0;
|
|
170
|
+
let mostRelated = null;
|
|
171
|
+
|
|
172
|
+
for (const [statement, fact] of this.knownFacts) {
|
|
173
|
+
const sim = this.similarity(responseEmbed, fact.embedding);
|
|
174
|
+
if (sim > maxAlignment) {
|
|
175
|
+
maxAlignment = sim;
|
|
176
|
+
mostRelated = { statement, confidence: fact.confidence };
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
return {
|
|
181
|
+
alignment: maxAlignment,
|
|
182
|
+
relatedFact: mostRelated,
|
|
183
|
+
verified: maxAlignment > 0.7
|
|
184
|
+
};
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
// ===========================================
|
|
189
|
+
// LLM WRAPPER WITH TINYALEPH
|
|
190
|
+
// ===========================================
|
|
191
|
+
|
|
192
|
+
class EnhancedLLM {
|
|
193
|
+
constructor(backend, llmFunction) {
|
|
194
|
+
this.backend = backend;
|
|
195
|
+
this.llm = llmFunction;
|
|
196
|
+
this.context = new SemanticContextManager(backend);
|
|
197
|
+
this.verifier = new ResponseVerifier(backend);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
// Query with semantic enhancement
|
|
201
|
+
query(userQuery) {
|
|
202
|
+
// 1. Build context from relevant history
|
|
203
|
+
const contextStr = this.context.buildContextString(userQuery);
|
|
204
|
+
|
|
205
|
+
// 2. Create enhanced prompt
|
|
206
|
+
const enhancedPrompt = contextStr
|
|
207
|
+
? `${contextStr}\n\nNew question: ${userQuery}`
|
|
208
|
+
: userQuery;
|
|
209
|
+
|
|
210
|
+
// 3. Get LLM response
|
|
211
|
+
const response = this.llm(enhancedPrompt);
|
|
212
|
+
|
|
213
|
+
// 4. Verify response
|
|
214
|
+
const verification = this.verifier.verify(response);
|
|
215
|
+
|
|
216
|
+
// 5. Store in context
|
|
217
|
+
this.context.addExchange(userQuery, response);
|
|
218
|
+
|
|
219
|
+
return {
|
|
220
|
+
response,
|
|
221
|
+
verification,
|
|
222
|
+
contextUsed: contextStr.length > 0
|
|
223
|
+
};
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
// Add knowledge for verification
|
|
227
|
+
addKnowledge(facts) {
|
|
228
|
+
for (const fact of facts) {
|
|
229
|
+
this.verifier.addFact(fact);
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
// ===========================================
|
|
235
|
+
// EXAMPLE USAGE
|
|
236
|
+
// ===========================================
|
|
237
|
+
|
|
238
|
+
console.log('TinyAleph LLM Integration Example');
|
|
239
|
+
console.log('==================================\n');
|
|
240
|
+
|
|
241
|
+
const enhancedLLM = new EnhancedLLM(backend, simulateLLM);
|
|
242
|
+
|
|
243
|
+
// Add some known facts for verification
|
|
244
|
+
enhancedLLM.addKnowledge([
|
|
245
|
+
'Machine learning is a type of artificial intelligence',
|
|
246
|
+
'Deep learning uses neural networks',
|
|
247
|
+
'AI systems can learn from data',
|
|
248
|
+
'Ethics in AI is an important consideration'
|
|
249
|
+
]);
|
|
250
|
+
|
|
251
|
+
console.log('Added knowledge base for verification.\n');
|
|
252
|
+
|
|
253
|
+
// ===========================================
|
|
254
|
+
// CONVERSATION WITH CONTEXT
|
|
255
|
+
// ===========================================
|
|
256
|
+
|
|
257
|
+
console.log('==================================');
|
|
258
|
+
console.log('Conversation with Semantic Context:');
|
|
259
|
+
console.log('==================================\n');
|
|
260
|
+
|
|
261
|
+
const queries = [
|
|
262
|
+
'Please explain what machine learning is',
|
|
263
|
+
'How does deep learning compare to traditional machine learning?',
|
|
264
|
+
'Can you summarize the key points from our AI discussion?'
|
|
265
|
+
];
|
|
266
|
+
|
|
267
|
+
for (const query of queries) {
|
|
268
|
+
console.log(`User: ${query}`);
|
|
269
|
+
const result = enhancedLLM.query(query);
|
|
270
|
+
console.log(`Assistant: ${result.response}`);
|
|
271
|
+
console.log(` [Context used: ${result.contextUsed ? 'YES' : 'NO'}]`);
|
|
272
|
+
console.log(` [Verification: ${result.verification.verified ? '✓ ALIGNED' : '? UNCERTAIN'}]`);
|
|
273
|
+
if (result.verification.relatedFact) {
|
|
274
|
+
console.log(` [Related to: "${result.verification.relatedFact.statement.substring(0, 40)}..."]`);
|
|
275
|
+
}
|
|
276
|
+
console.log();
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// ===========================================
|
|
280
|
+
// SEMANTIC PROMPT ENGINEERING
|
|
281
|
+
// ===========================================
|
|
282
|
+
|
|
283
|
+
console.log('==================================');
|
|
284
|
+
console.log('Semantic Prompt Engineering:');
|
|
285
|
+
console.log('==================================\n');
|
|
286
|
+
|
|
287
|
+
// Generate semantically-grounded prompts
|
|
288
|
+
function generateSemanticPrompt(topic, style) {
|
|
289
|
+
const topicEmbed = backend.textToOrderedState(topic);
|
|
290
|
+
const styleEmbed = backend.textToOrderedState(style);
|
|
291
|
+
|
|
292
|
+
// Combine embeddings for prompt guidance
|
|
293
|
+
const combined = Hypercomplex.zero(16);
|
|
294
|
+
for (let i = 0; i < 16; i++) {
|
|
295
|
+
combined.c[i] = (topicEmbed.c[i] + styleEmbed.c[i]) / 2;
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
// Use magnitude as complexity hint
|
|
299
|
+
const complexity = combined.norm();
|
|
300
|
+
|
|
301
|
+
return {
|
|
302
|
+
topic,
|
|
303
|
+
style,
|
|
304
|
+
suggestedLength: complexity > 1 ? 'detailed' : 'concise',
|
|
305
|
+
embedding: combined
|
|
306
|
+
};
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
const prompts = [
|
|
310
|
+
generateSemanticPrompt('quantum computing basics', 'beginner-friendly'),
|
|
311
|
+
generateSemanticPrompt('neural network architecture', 'technical deep-dive'),
|
|
312
|
+
generateSemanticPrompt('AI ethics', 'philosophical discussion')
|
|
313
|
+
];
|
|
314
|
+
|
|
315
|
+
for (const prompt of prompts) {
|
|
316
|
+
console.log(`Topic: ${prompt.topic}`);
|
|
317
|
+
console.log(`Style: ${prompt.style}`);
|
|
318
|
+
console.log(`Suggested length: ${prompt.suggestedLength}`);
|
|
319
|
+
console.log();
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
// ===========================================
|
|
323
|
+
// KEY TAKEAWAYS
|
|
324
|
+
// ===========================================
|
|
325
|
+
|
|
326
|
+
console.log('==================================');
|
|
327
|
+
console.log('KEY TAKEAWAYS:');
|
|
328
|
+
console.log('1. Use SemanticContextManager for relevant history retrieval');
|
|
329
|
+
console.log('2. ResponseVerifier checks LLM outputs against known facts');
|
|
330
|
+
console.log('3. Prime embeddings provide deterministic semantic grounding');
|
|
331
|
+
console.log('4. Combine with real LLM APIs (OpenAI, Anthropic, etc.)');
|
|
332
|
+
console.log('5. Semantic prompts guide LLM behavior consistently');
|
|
333
|
+
console.log('6. Build hybrid systems: symbolic reasoning + neural generation');
|