@aleph-ai/tinyaleph 1.5.7 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/core/alexander-module.js +1469 -0
- package/core/arithmetic-link-kernel.js +1338 -0
- package/core/index.js +95 -2
- package/examples/01-hello-world.js +69 -0
- package/examples/02-basic-hash.js +90 -0
- package/examples/02-observer-stack.js +385 -0
- package/examples/03-quantum-coin.js +136 -0
- package/examples/05-symbolic-resonance.js +146 -0
- package/examples/06-symbol-database.js +150 -0
- package/examples/07-semantic-inference.js +223 -0
- package/examples/08-compound-symbols.js +219 -0
- package/examples/README.md +170 -0
- package/examples/ai/01-embeddings.js +155 -0
- package/examples/ai/02-semantic-memory.js +243 -0
- package/examples/ai/03-reasoning.js +243 -0
- package/examples/ai/04-knowledge-graph.js +279 -0
- package/examples/ai/05-llm-integration.js +333 -0
- package/examples/ai/06-agent.js +294 -0
- package/examples/ai/07-hybrid-ai.js +223 -0
- package/examples/ai/08-entropy-reasoning.js +259 -0
- package/examples/ai/09-concept-learning.js +271 -0
- package/examples/ai/10-prompt-primes.js +312 -0
- package/examples/ai/11-rag.js +332 -0
- package/examples/ai/12-neuro-symbolic.js +321 -0
- package/examples/ai/README.md +80 -0
- package/examples/arithmetic-topology/01-legendre-symbol.js +78 -0
- package/examples/arithmetic-topology/02-redei-symbol.js +126 -0
- package/examples/arithmetic-topology/03-alk-kuramoto.js +138 -0
- package/examples/arithmetic-topology/04-alexander-module.js +117 -0
- package/examples/arithmetic-topology/05-signature-memory.js +118 -0
- package/examples/arithmetic-topology/README.md +291 -0
- package/examples/bioinformatics/01-dna-encoding.js +108 -0
- package/examples/bioinformatics/02-central-dogma.js +162 -0
- package/examples/bioinformatics/03-protein-folding.js +206 -0
- package/examples/bioinformatics/04-dna-computing.js +192 -0
- package/examples/bioinformatics/05-molecular-binding.js +209 -0
- package/examples/chat.js +105 -0
- package/examples/crt-homology/01-residue-encoding.js +87 -0
- package/examples/crt-homology/02-birkhoff-attention.js +100 -0
- package/examples/crt-homology/03-homology-loss.js +132 -0
- package/examples/crt-homology/04-crt-resoformer.js +132 -0
- package/examples/crt-homology/README.md +67 -0
- package/examples/crypto/01-password-hash.js +210 -0
- package/examples/crypto/02-key-derivation.js +210 -0
- package/examples/crypto/03-hmac.js +229 -0
- package/examples/crypto/04-file-integrity.js +263 -0
- package/examples/crypto/05-content-hash.js +263 -0
- package/examples/crypto/README.md +99 -0
- package/examples/demo-modular.js +223 -0
- package/examples/demo-two-layer.js +196 -0
- package/examples/discrete/01-integer-sine-table.js +120 -0
- package/examples/discrete/02-codebook-tunneling.js +118 -0
- package/examples/discrete/03-canonical-fusion.js +135 -0
- package/examples/discrete/04-tick-gate.js +139 -0
- package/examples/discrete/README.md +142 -0
- package/examples/formal-semantics/01-typed-terms.js +156 -0
- package/examples/formal-semantics/02-reduction.js +202 -0
- package/examples/formal-semantics/03-lambda-translation.js +206 -0
- package/examples/formal-semantics/04-enochian-language.js +257 -0
- package/examples/formal-semantics/README.md +98 -0
- package/examples/math/01-quaternions.js +237 -0
- package/examples/math/02-octonions.js +192 -0
- package/examples/math/03-prime-factorization.js +215 -0
- package/examples/math/04-vector-spaces.js +210 -0
- package/examples/math/05-gaussian-primes.js +234 -0
- package/examples/math/README.md +93 -0
- package/examples/physics/01-oscillator.js +177 -0
- package/examples/physics/02-lyapunov.js +201 -0
- package/examples/physics/03-collapse.js +183 -0
- package/examples/physics/04-kuramoto.js +212 -0
- package/examples/physics/05-entropy.js +226 -0
- package/examples/physics/05-sync-models.js +298 -0
- package/examples/physics/06-primeon-ladder.js +233 -0
- package/examples/physics/07-kuramoto-coupled-ladder.js +298 -0
- package/examples/physics/README.md +126 -0
- package/examples/resonance/01-prime-hilbert-space.js +140 -0
- package/examples/resonance/02-prime-resonance-network.js +221 -0
- package/examples/resonance/03-resoformer.js +349 -0
- package/examples/resonance/04-resoformer-training.js +329 -0
- package/examples/resonance/05-language-model.js +484 -0
- package/examples/resonance/README.md +238 -0
- package/examples/run-examples.js +417 -0
- package/examples/scientific/01-single-qubit.js +185 -0
- package/examples/scientific/02-two-qubit.js +209 -0
- package/examples/scientific/03-quantum-circuits.js +270 -0
- package/examples/scientific/04-measurement.js +229 -0
- package/examples/scientific/05-algorithms.js +245 -0
- package/examples/scientific/06-random.js +225 -0
- package/examples/scientific/07-wavefunction.js +192 -0
- package/examples/scientific/README.md +118 -0
- package/examples/semantic/01-vocabulary.js +186 -0
- package/examples/semantic/02-similarity.js +263 -0
- package/examples/semantic/03-word-algebra.js +295 -0
- package/examples/semantic/04-clustering.js +348 -0
- package/examples/semantic/05-classification.js +386 -0
- package/examples/semantic/06-dna-encoding.js +228 -0
- package/examples/semantic/07-search.js +304 -0
- package/examples/semantic/08-qa-system.js +278 -0
- package/examples/semantic/README.md +116 -0
- package/examples/topology/01-108-invariant.js +81 -0
- package/examples/topology/02-trefoil-constants.js +112 -0
- package/examples/topology/03-gauge-symmetry.js +112 -0
- package/examples/topology/04-free-energy-dynamics.js +124 -0
- package/examples/topology/README.md +129 -0
- package/index.js +32 -0
- package/modular.js +63 -2
- package/package.json +8 -3
- package/physics/alk-kuramoto.js +817 -0
- package/physics/index.js +23 -2
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @example Entropy-Based Reasoning
|
|
3
|
+
* @description Use entropy minimization for inference
|
|
4
|
+
*
|
|
5
|
+
* TinyAleph's physics module provides entropy calculations that can
|
|
6
|
+
* guide reasoning. Low entropy indicates certainty; high entropy
|
|
7
|
+
* indicates confusion. This example shows:
|
|
8
|
+
* - Measuring reasoning uncertainty
|
|
9
|
+
* - Guiding inference toward clarity
|
|
10
|
+
* - Detecting when more information is needed
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
const { SemanticBackend, Hypercomplex } = require('../../modular');
|
|
14
|
+
|
|
15
|
+
// ===========================================
|
|
16
|
+
// SETUP
|
|
17
|
+
// ===========================================
|
|
18
|
+
|
|
19
|
+
const backend = new SemanticBackend({ dimension: 16 });
|
|
20
|
+
|
|
21
|
+
// ===========================================
|
|
22
|
+
// ENTROPY CALCULATOR
|
|
23
|
+
// ===========================================
|
|
24
|
+
|
|
25
|
+
class EntropyReasoner {
|
|
26
|
+
constructor(backend) {
|
|
27
|
+
this.backend = backend;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// Calculate Shannon entropy of hypercomplex state
|
|
31
|
+
calculateEntropy(state) {
|
|
32
|
+
const components = state.c.map(c => Math.abs(c));
|
|
33
|
+
const sum = components.reduce((s, c) => s + c, 0);
|
|
34
|
+
if (sum === 0) return 0;
|
|
35
|
+
|
|
36
|
+
const probs = components.map(c => c / sum);
|
|
37
|
+
let entropy = 0;
|
|
38
|
+
for (const p of probs) {
|
|
39
|
+
if (p > 0) {
|
|
40
|
+
entropy -= p * Math.log2(p);
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
return entropy;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
// Maximum possible entropy for this dimension
|
|
47
|
+
maxEntropy(dim = 16) {
|
|
48
|
+
return Math.log2(dim);
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// Normalized entropy (0 = certain, 1 = maximum confusion)
|
|
52
|
+
normalizedEntropy(state) {
|
|
53
|
+
return this.calculateEntropy(state) / this.maxEntropy(state.c.length);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// Interpret entropy level
|
|
57
|
+
interpret(entropy) {
|
|
58
|
+
if (entropy < 0.3) return { level: 'LOW', meaning: 'High certainty', action: 'Proceed with confidence' };
|
|
59
|
+
if (entropy < 0.6) return { level: 'MEDIUM', meaning: 'Some uncertainty', action: 'Consider alternatives' };
|
|
60
|
+
if (entropy < 0.8) return { level: 'HIGH', meaning: 'Significant confusion', action: 'Gather more information' };
|
|
61
|
+
return { level: 'VERY_HIGH', meaning: 'Maximum uncertainty', action: 'Cannot make reliable inference' };
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// ===========================================
|
|
66
|
+
// ENTROPY-GUIDED INFERENCE
|
|
67
|
+
// ===========================================
|
|
68
|
+
|
|
69
|
+
class EntropyGuidedInference {
|
|
70
|
+
constructor(backend) {
|
|
71
|
+
this.backend = backend;
|
|
72
|
+
this.reasoner = new EntropyReasoner(backend);
|
|
73
|
+
this.hypotheses = [];
|
|
74
|
+
this.evidence = [];
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// Compute similarity
|
|
78
|
+
similarity(a, b) {
|
|
79
|
+
let dot = 0, magA = 0, magB = 0;
|
|
80
|
+
for (let i = 0; i < a.c.length; i++) {
|
|
81
|
+
dot += a.c[i] * b.c[i];
|
|
82
|
+
magA += a.c[i] * a.c[i];
|
|
83
|
+
magB += b.c[i] * b.c[i];
|
|
84
|
+
}
|
|
85
|
+
return dot / (Math.sqrt(magA) * Math.sqrt(magB));
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// Add a hypothesis
|
|
89
|
+
addHypothesis(description, prior = 0.5) {
|
|
90
|
+
this.hypotheses.push({
|
|
91
|
+
description,
|
|
92
|
+
embedding: this.backend.textToOrderedState(description),
|
|
93
|
+
probability: prior
|
|
94
|
+
});
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// Add evidence
|
|
98
|
+
addEvidence(observation) {
|
|
99
|
+
const embedding = this.backend.textToOrderedState(observation);
|
|
100
|
+
this.evidence.push({ observation, embedding });
|
|
101
|
+
|
|
102
|
+
// Update hypothesis probabilities based on evidence
|
|
103
|
+
this.updateProbabilities(embedding);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
// Bayesian-like update of probabilities
|
|
107
|
+
updateProbabilities(evidenceEmbed) {
|
|
108
|
+
const likelihoods = this.hypotheses.map(h =>
|
|
109
|
+
Math.max(0.1, this.similarity(h.embedding, evidenceEmbed))
|
|
110
|
+
);
|
|
111
|
+
|
|
112
|
+
const totalLikelihood = likelihoods.reduce((s, l) => s + l, 0);
|
|
113
|
+
|
|
114
|
+
for (let i = 0; i < this.hypotheses.length; i++) {
|
|
115
|
+
const likelihood = likelihoods[i] / totalLikelihood;
|
|
116
|
+
this.hypotheses[i].probability =
|
|
117
|
+
(this.hypotheses[i].probability * likelihood) /
|
|
118
|
+
(this.hypotheses[i].probability * likelihood +
|
|
119
|
+
(1 - this.hypotheses[i].probability) * (1 - likelihood));
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// Normalize
|
|
123
|
+
const total = this.hypotheses.reduce((s, h) => s + h.probability, 0);
|
|
124
|
+
for (const h of this.hypotheses) {
|
|
125
|
+
h.probability /= total;
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// Get current belief state as hypercomplex
|
|
130
|
+
getBeliefState() {
|
|
131
|
+
const state = Hypercomplex.zero(16);
|
|
132
|
+
for (let i = 0; i < this.hypotheses.length && i < 16; i++) {
|
|
133
|
+
state.c[i] = this.hypotheses[i].probability;
|
|
134
|
+
}
|
|
135
|
+
return state;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// Get entropy of current beliefs
|
|
139
|
+
getBeliefEntropy() {
|
|
140
|
+
const state = this.getBeliefState();
|
|
141
|
+
return this.reasoner.normalizedEntropy(state);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
// Recommend action based on entropy
|
|
145
|
+
recommend() {
|
|
146
|
+
const entropy = this.getBeliefEntropy();
|
|
147
|
+
const interpretation = this.reasoner.interpret(entropy);
|
|
148
|
+
|
|
149
|
+
// Find best hypothesis
|
|
150
|
+
const sorted = [...this.hypotheses].sort((a, b) => b.probability - a.probability);
|
|
151
|
+
const best = sorted[0];
|
|
152
|
+
const secondBest = sorted[1];
|
|
153
|
+
|
|
154
|
+
return {
|
|
155
|
+
entropy,
|
|
156
|
+
interpretation,
|
|
157
|
+
bestHypothesis: best,
|
|
158
|
+
confidence: best ? (best.probability - (secondBest?.probability || 0)) : 0
|
|
159
|
+
};
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// ===========================================
|
|
164
|
+
// EXAMPLE: DIAGNOSTIC REASONING
|
|
165
|
+
// ===========================================
|
|
166
|
+
|
|
167
|
+
console.log('TinyAleph Entropy-Based Reasoning Example');
|
|
168
|
+
console.log('==========================================\n');
|
|
169
|
+
|
|
170
|
+
const inference = new EntropyGuidedInference(backend);
|
|
171
|
+
|
|
172
|
+
// Scenario: Diagnosing a car problem
|
|
173
|
+
console.log('Scenario: Car won\'t start. What\'s wrong?\n');
|
|
174
|
+
|
|
175
|
+
// Add hypotheses
|
|
176
|
+
inference.addHypothesis('Dead battery - no electrical power', 0.25);
|
|
177
|
+
inference.addHypothesis('Empty fuel tank - no gas', 0.25);
|
|
178
|
+
inference.addHypothesis('Failed starter motor - mechanical issue', 0.25);
|
|
179
|
+
inference.addHypothesis('Electrical system failure - wiring problem', 0.25);
|
|
180
|
+
|
|
181
|
+
console.log('Initial hypotheses (equal probability):');
|
|
182
|
+
for (const h of inference.hypotheses) {
|
|
183
|
+
console.log(` [${(h.probability * 100).toFixed(0)}%] ${h.description}`);
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
let rec = inference.recommend();
|
|
187
|
+
console.log(`\nInitial entropy: ${rec.entropy.toFixed(3)} (${rec.interpretation.level})`);
|
|
188
|
+
console.log(`Action: ${rec.interpretation.action}\n`);
|
|
189
|
+
|
|
190
|
+
// Add evidence
|
|
191
|
+
console.log('─'.repeat(50));
|
|
192
|
+
console.log('Adding evidence: "Dashboard lights don\'t turn on"\n');
|
|
193
|
+
inference.addEvidence('Dashboard lights and electronics are not working');
|
|
194
|
+
|
|
195
|
+
console.log('Updated probabilities:');
|
|
196
|
+
for (const h of inference.hypotheses) {
|
|
197
|
+
console.log(` [${(h.probability * 100).toFixed(0)}%] ${h.description}`);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
rec = inference.recommend();
|
|
201
|
+
console.log(`\nEntropy: ${rec.entropy.toFixed(3)} (${rec.interpretation.level})`);
|
|
202
|
+
console.log(`Action: ${rec.interpretation.action}`);
|
|
203
|
+
|
|
204
|
+
// Add more evidence
|
|
205
|
+
console.log('\n' + '─'.repeat(50));
|
|
206
|
+
console.log('Adding evidence: "Headlights also don\'t work"\n');
|
|
207
|
+
inference.addEvidence('Headlights and all lights are dead');
|
|
208
|
+
|
|
209
|
+
console.log('Updated probabilities:');
|
|
210
|
+
for (const h of inference.hypotheses) {
|
|
211
|
+
console.log(` [${(h.probability * 100).toFixed(0)}%] ${h.description}`);
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
rec = inference.recommend();
|
|
215
|
+
console.log(`\nEntropy: ${rec.entropy.toFixed(3)} (${rec.interpretation.level})`);
|
|
216
|
+
console.log(`Best hypothesis: "${rec.bestHypothesis.description}"`);
|
|
217
|
+
console.log(`Confidence margin: ${(rec.confidence * 100).toFixed(0)}%`);
|
|
218
|
+
console.log(`Action: ${rec.interpretation.action}`);
|
|
219
|
+
|
|
220
|
+
// ===========================================
|
|
221
|
+
// ENTROPY COMPARISON
|
|
222
|
+
// ===========================================
|
|
223
|
+
|
|
224
|
+
console.log('\n' + '═'.repeat(50));
|
|
225
|
+
console.log('Entropy Analysis of Different States:');
|
|
226
|
+
console.log('═'.repeat(50) + '\n');
|
|
227
|
+
|
|
228
|
+
const reasoner = new EntropyReasoner(backend);
|
|
229
|
+
|
|
230
|
+
// Different semantic states
|
|
231
|
+
const states = [
|
|
232
|
+
{ name: 'Certain answer', text: 'The answer is definitely 42' },
|
|
233
|
+
{ name: 'Uncertain', text: 'The answer might be 42 or possibly 43 or maybe something else' },
|
|
234
|
+
{ name: 'Question', text: 'What is the meaning of life?' },
|
|
235
|
+
{ name: 'Contradiction', text: 'It is both true and false simultaneously' }
|
|
236
|
+
];
|
|
237
|
+
|
|
238
|
+
for (const { name, text } of states) {
|
|
239
|
+
const embedding = backend.textToOrderedState(text);
|
|
240
|
+
const entropy = reasoner.normalizedEntropy(embedding);
|
|
241
|
+
const interp = reasoner.interpret(entropy);
|
|
242
|
+
console.log(`${name}:`);
|
|
243
|
+
console.log(` Text: "${text}"`);
|
|
244
|
+
console.log(` Entropy: ${entropy.toFixed(3)} (${interp.level})`);
|
|
245
|
+
console.log();
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
// ===========================================
|
|
249
|
+
// KEY TAKEAWAYS
|
|
250
|
+
// ===========================================
|
|
251
|
+
|
|
252
|
+
console.log('═'.repeat(50));
|
|
253
|
+
console.log('KEY TAKEAWAYS:');
|
|
254
|
+
console.log('1. Entropy measures uncertainty in beliefs/states');
|
|
255
|
+
console.log('2. Low entropy = high certainty = proceed confidently');
|
|
256
|
+
console.log('3. High entropy = confusion = gather more evidence');
|
|
257
|
+
console.log('4. Evidence updates shift probability distribution');
|
|
258
|
+
console.log('5. Use entropy to decide when to stop reasoning');
|
|
259
|
+
console.log('6. Combine with Bayesian updates for powerful inference');
|
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @example Concept Learning
|
|
3
|
+
* @description Learn new concepts from examples
|
|
4
|
+
*
|
|
5
|
+
* This example shows how to use TinyAleph for few-shot concept learning:
|
|
6
|
+
* - Define concepts from positive/negative examples
|
|
7
|
+
* - Generalize to new instances
|
|
8
|
+
* - Build concept hierarchies
|
|
9
|
+
* - Handle edge cases
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
const { SemanticBackend, Hypercomplex } = require('../../modular');
|
|
13
|
+
|
|
14
|
+
// ===========================================
|
|
15
|
+
// SETUP
|
|
16
|
+
// ===========================================
|
|
17
|
+
|
|
18
|
+
const backend = new SemanticBackend({ dimension: 16 });
|
|
19
|
+
|
|
20
|
+
// ===========================================
|
|
21
|
+
// CONCEPT LEARNER
|
|
22
|
+
// ===========================================
|
|
23
|
+
|
|
24
|
+
class ConceptLearner {
|
|
25
|
+
constructor(backend) {
|
|
26
|
+
this.backend = backend;
|
|
27
|
+
this.concepts = new Map();
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// Compute similarity
|
|
31
|
+
similarity(a, b) {
|
|
32
|
+
let dot = 0, magA = 0, magB = 0;
|
|
33
|
+
for (let i = 0; i < a.c.length; i++) {
|
|
34
|
+
dot += a.c[i] * b.c[i];
|
|
35
|
+
magA += a.c[i] * a.c[i];
|
|
36
|
+
magB += b.c[i] * b.c[i];
|
|
37
|
+
}
|
|
38
|
+
return dot / (Math.sqrt(magA) * Math.sqrt(magB));
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// Learn a concept from examples
|
|
42
|
+
learnConcept(name, positiveExamples, negativeExamples = []) {
|
|
43
|
+
// Encode all examples
|
|
44
|
+
const posEmbeddings = positiveExamples.map(ex => this.backend.textToOrderedState(ex));
|
|
45
|
+
const negEmbeddings = negativeExamples.map(ex => this.backend.textToOrderedState(ex));
|
|
46
|
+
|
|
47
|
+
// Compute centroid of positive examples
|
|
48
|
+
const centroid = Hypercomplex.zero(16);
|
|
49
|
+
for (const emb of posEmbeddings) {
|
|
50
|
+
for (let i = 0; i < 16; i++) {
|
|
51
|
+
centroid.c[i] += emb.c[i] / posEmbeddings.length;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// Compute variance (how spread out the examples are)
|
|
56
|
+
let variance = 0;
|
|
57
|
+
for (const emb of posEmbeddings) {
|
|
58
|
+
const dist = 1 - this.similarity(emb, centroid);
|
|
59
|
+
variance += dist * dist;
|
|
60
|
+
}
|
|
61
|
+
variance = Math.sqrt(variance / posEmbeddings.length);
|
|
62
|
+
|
|
63
|
+
// Compute boundary (based on negative examples)
|
|
64
|
+
let threshold = 0.5;
|
|
65
|
+
if (negEmbeddings.length > 0) {
|
|
66
|
+
const negSims = negEmbeddings.map(emb => this.similarity(emb, centroid));
|
|
67
|
+
const maxNegSim = Math.max(...negSims);
|
|
68
|
+
const minPosSim = Math.min(...posEmbeddings.map(emb => this.similarity(emb, centroid)));
|
|
69
|
+
threshold = (maxNegSim + minPosSim) / 2;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
this.concepts.set(name, {
|
|
73
|
+
name,
|
|
74
|
+
centroid,
|
|
75
|
+
variance,
|
|
76
|
+
threshold,
|
|
77
|
+
positiveExamples,
|
|
78
|
+
negativeExamples
|
|
79
|
+
});
|
|
80
|
+
|
|
81
|
+
return { variance, threshold };
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Classify a new instance
|
|
85
|
+
classify(instance) {
|
|
86
|
+
const embedding = this.backend.textToOrderedState(instance);
|
|
87
|
+
const results = [];
|
|
88
|
+
|
|
89
|
+
for (const [name, concept] of this.concepts) {
|
|
90
|
+
const sim = this.similarity(embedding, concept.centroid);
|
|
91
|
+
const isMember = sim > concept.threshold;
|
|
92
|
+
const confidence = isMember
|
|
93
|
+
? (sim - concept.threshold) / (1 - concept.threshold)
|
|
94
|
+
: (concept.threshold - sim) / concept.threshold;
|
|
95
|
+
|
|
96
|
+
results.push({
|
|
97
|
+
concept: name,
|
|
98
|
+
similarity: sim,
|
|
99
|
+
threshold: concept.threshold,
|
|
100
|
+
isMember,
|
|
101
|
+
confidence: Math.abs(confidence)
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
results.sort((a, b) => b.similarity - a.similarity);
|
|
106
|
+
return results;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// Find the best matching concept
|
|
110
|
+
bestMatch(instance) {
|
|
111
|
+
const results = this.classify(instance);
|
|
112
|
+
const members = results.filter(r => r.isMember);
|
|
113
|
+
return members.length > 0 ? members[0] : null;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// Generate prototype (most typical instance description)
|
|
117
|
+
getPrototype(conceptName) {
|
|
118
|
+
const concept = this.concepts.get(conceptName);
|
|
119
|
+
if (!concept) return null;
|
|
120
|
+
|
|
121
|
+
// The centroid represents the prototype
|
|
122
|
+
// Return the most similar positive example
|
|
123
|
+
let bestExample = null;
|
|
124
|
+
let bestSim = -1;
|
|
125
|
+
|
|
126
|
+
for (const ex of concept.positiveExamples) {
|
|
127
|
+
const sim = this.similarity(this.backend.textToOrderedState(ex), concept.centroid);
|
|
128
|
+
if (sim > bestSim) {
|
|
129
|
+
bestSim = sim;
|
|
130
|
+
bestExample = ex;
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
return { prototype: bestExample, typicality: bestSim };
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// ===========================================
|
|
139
|
+
// EXAMPLE: LEARNING ANIMALS
|
|
140
|
+
// ===========================================
|
|
141
|
+
|
|
142
|
+
console.log('TinyAleph Concept Learning Example');
|
|
143
|
+
console.log('===================================\n');
|
|
144
|
+
|
|
145
|
+
const learner = new ConceptLearner(backend);
|
|
146
|
+
|
|
147
|
+
// Learn "bird" concept
|
|
148
|
+
console.log('Learning concept: BIRD');
|
|
149
|
+
const birdStats = learner.learnConcept('bird', [
|
|
150
|
+
'a small feathered creature that flies',
|
|
151
|
+
'an animal with wings and a beak',
|
|
152
|
+
'a creature that lays eggs and has feathers',
|
|
153
|
+
'a flying animal that sings in trees'
|
|
154
|
+
], [
|
|
155
|
+
'a furry animal with four legs',
|
|
156
|
+
'a fish that swims in water',
|
|
157
|
+
'an insect with six legs'
|
|
158
|
+
]);
|
|
159
|
+
console.log(` Variance: ${birdStats.variance.toFixed(3)}, Threshold: ${birdStats.threshold.toFixed(3)}`);
|
|
160
|
+
|
|
161
|
+
// Learn "mammal" concept
|
|
162
|
+
console.log('Learning concept: MAMMAL');
|
|
163
|
+
const mammalStats = learner.learnConcept('mammal', [
|
|
164
|
+
'a warm-blooded animal with fur',
|
|
165
|
+
'a creature that gives live birth and nurses young',
|
|
166
|
+
'an animal with hair that feeds milk to babies'
|
|
167
|
+
], [
|
|
168
|
+
'a cold-blooded reptile',
|
|
169
|
+
'an animal that lays eggs',
|
|
170
|
+
'a fish with scales'
|
|
171
|
+
]);
|
|
172
|
+
console.log(` Variance: ${mammalStats.variance.toFixed(3)}, Threshold: ${mammalStats.threshold.toFixed(3)}`);
|
|
173
|
+
|
|
174
|
+
// Learn "vehicle" concept
|
|
175
|
+
console.log('Learning concept: VEHICLE');
|
|
176
|
+
const vehicleStats = learner.learnConcept('vehicle', [
|
|
177
|
+
'a machine used for transportation',
|
|
178
|
+
'a car that drives on roads',
|
|
179
|
+
'a truck that carries cargo',
|
|
180
|
+
'an automobile with wheels and engine'
|
|
181
|
+
], [
|
|
182
|
+
'a bicycle powered by pedaling',
|
|
183
|
+
'a horse that carries riders',
|
|
184
|
+
'a boat that floats on water'
|
|
185
|
+
]);
|
|
186
|
+
console.log(` Variance: ${vehicleStats.variance.toFixed(3)}, Threshold: ${vehicleStats.threshold.toFixed(3)}\n`);
|
|
187
|
+
|
|
188
|
+
// ===========================================
|
|
189
|
+
// CLASSIFY NEW INSTANCES
|
|
190
|
+
// ===========================================
|
|
191
|
+
|
|
192
|
+
console.log('===================================');
|
|
193
|
+
console.log('Classifying new instances:');
|
|
194
|
+
console.log('===================================\n');
|
|
195
|
+
|
|
196
|
+
const testInstances = [
|
|
197
|
+
'a sparrow with colorful feathers',
|
|
198
|
+
'a dog with soft fur',
|
|
199
|
+
'a sedan automobile',
|
|
200
|
+
'a penguin that cannot fly', // Edge case!
|
|
201
|
+
'a bat that flies at night', // Tricky!
|
|
202
|
+
'a robot car that drives itself'
|
|
203
|
+
];
|
|
204
|
+
|
|
205
|
+
for (const instance of testInstances) {
|
|
206
|
+
const results = learner.classify(instance);
|
|
207
|
+
const best = results[0];
|
|
208
|
+
|
|
209
|
+
console.log(`"${instance}"`);
|
|
210
|
+
console.log(` Best match: ${best.concept} (sim: ${best.similarity.toFixed(3)}, member: ${best.isMember ? 'YES' : 'NO'})`);
|
|
211
|
+
|
|
212
|
+
// Show all matches
|
|
213
|
+
for (const r of results) {
|
|
214
|
+
const status = r.isMember ? '✓' : '✗';
|
|
215
|
+
console.log(` ${status} ${r.concept}: ${(r.similarity * 100).toFixed(1)}%`);
|
|
216
|
+
}
|
|
217
|
+
console.log();
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
// ===========================================
|
|
221
|
+
// CONCEPT PROTOTYPES
|
|
222
|
+
// ===========================================
|
|
223
|
+
|
|
224
|
+
console.log('===================================');
|
|
225
|
+
console.log('Concept Prototypes:');
|
|
226
|
+
console.log('===================================\n');
|
|
227
|
+
|
|
228
|
+
for (const [name, _] of learner.concepts) {
|
|
229
|
+
const proto = learner.getPrototype(name);
|
|
230
|
+
console.log(`${name.toUpperCase()}:`);
|
|
231
|
+
console.log(` Most typical: "${proto.prototype}"`);
|
|
232
|
+
console.log(` Typicality: ${(proto.typicality * 100).toFixed(1)}%\n`);
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
// ===========================================
|
|
236
|
+
// INCREMENTAL LEARNING
|
|
237
|
+
// ===========================================
|
|
238
|
+
|
|
239
|
+
console.log('===================================');
|
|
240
|
+
console.log('Incremental Learning:');
|
|
241
|
+
console.log('===================================\n');
|
|
242
|
+
|
|
243
|
+
// Add more examples to refine the concept
|
|
244
|
+
console.log('Adding more bird examples...');
|
|
245
|
+
learner.learnConcept('bird', [
|
|
246
|
+
...learner.concepts.get('bird').positiveExamples,
|
|
247
|
+
'an eagle soaring in the sky',
|
|
248
|
+
'a parrot that can mimic speech',
|
|
249
|
+
'a penguin that swims but cannot fly'
|
|
250
|
+
], [
|
|
251
|
+
...learner.concepts.get('bird').negativeExamples,
|
|
252
|
+
'a bat that uses echolocation'
|
|
253
|
+
]);
|
|
254
|
+
|
|
255
|
+
// Re-test penguin
|
|
256
|
+
const penguinResult = learner.classify('a penguin that cannot fly');
|
|
257
|
+
console.log(`Re-classifying "penguin that cannot fly":`);
|
|
258
|
+
console.log(` Bird similarity: ${(penguinResult.find(r => r.concept === 'bird').similarity * 100).toFixed(1)}%`);
|
|
259
|
+
|
|
260
|
+
// ===========================================
|
|
261
|
+
// KEY TAKEAWAYS
|
|
262
|
+
// ===========================================
|
|
263
|
+
|
|
264
|
+
console.log('\n===================================');
|
|
265
|
+
console.log('KEY TAKEAWAYS:');
|
|
266
|
+
console.log('1. Concepts are learned from positive/negative examples');
|
|
267
|
+
console.log('2. Centroids represent the "prototype" of a concept');
|
|
268
|
+
console.log('3. Thresholds separate members from non-members');
|
|
269
|
+
console.log('4. Variance indicates how tightly defined a concept is');
|
|
270
|
+
console.log('5. Edge cases (penguin, bat) reveal concept boundaries');
|
|
271
|
+
console.log('6. Incremental learning refines concepts over time');
|