@sparkleideas/ruv-swarm 1.0.18-patch.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1565 -0
- package/bin/ruv-swarm-clean.js +1872 -0
- package/bin/ruv-swarm-memory.js +119 -0
- package/bin/ruv-swarm-secure-heartbeat.js +1549 -0
- package/bin/ruv-swarm-secure.js +1689 -0
- package/package.json +221 -0
- package/src/agent.ts +342 -0
- package/src/benchmark.js +267 -0
- package/src/claude-flow-enhanced.js +839 -0
- package/src/claude-integration/advanced-commands.js +561 -0
- package/src/claude-integration/core.js +112 -0
- package/src/claude-integration/docs.js +1548 -0
- package/src/claude-integration/env-template.js +39 -0
- package/src/claude-integration/index.js +209 -0
- package/src/claude-integration/remote.js +408 -0
- package/src/cli-diagnostics.js +364 -0
- package/src/cognitive-pattern-evolution.js +1317 -0
- package/src/daa-cognition.js +977 -0
- package/src/daa-service.d.ts +298 -0
- package/src/daa-service.js +1116 -0
- package/src/diagnostics.js +533 -0
- package/src/errors.js +528 -0
- package/src/github-coordinator/README.md +193 -0
- package/src/github-coordinator/claude-hooks.js +162 -0
- package/src/github-coordinator/gh-cli-coordinator.js +260 -0
- package/src/hooks/cli.js +82 -0
- package/src/hooks/index.js +1900 -0
- package/src/index-enhanced.d.ts +371 -0
- package/src/index-enhanced.js +734 -0
- package/src/index.d.ts +287 -0
- package/src/index.js +405 -0
- package/src/index.ts +457 -0
- package/src/logger.js +182 -0
- package/src/logging-config.js +179 -0
- package/src/mcp-daa-tools.js +735 -0
- package/src/mcp-tools-benchmarks.js +328 -0
- package/src/mcp-tools-enhanced.js +2863 -0
- package/src/memory-config.js +42 -0
- package/src/meta-learning-framework.js +1359 -0
- package/src/neural-agent.js +830 -0
- package/src/neural-coordination-protocol.js +1363 -0
- package/src/neural-models/README.md +118 -0
- package/src/neural-models/autoencoder.js +543 -0
- package/src/neural-models/base.js +269 -0
- package/src/neural-models/cnn.js +497 -0
- package/src/neural-models/gnn.js +447 -0
- package/src/neural-models/gru.js +536 -0
- package/src/neural-models/index.js +273 -0
- package/src/neural-models/lstm.js +551 -0
- package/src/neural-models/neural-presets-complete.js +1306 -0
- package/src/neural-models/presets/graph.js +392 -0
- package/src/neural-models/presets/index.js +279 -0
- package/src/neural-models/presets/nlp.js +328 -0
- package/src/neural-models/presets/timeseries.js +368 -0
- package/src/neural-models/presets/vision.js +387 -0
- package/src/neural-models/resnet.js +534 -0
- package/src/neural-models/transformer.js +515 -0
- package/src/neural-models/vae.js +489 -0
- package/src/neural-network-manager.js +1938 -0
- package/src/neural-network.ts +296 -0
- package/src/neural.js +574 -0
- package/src/performance-benchmarks.js +898 -0
- package/src/performance.js +458 -0
- package/src/persistence-pooled.js +695 -0
- package/src/persistence.js +480 -0
- package/src/schemas.js +864 -0
- package/src/security.js +218 -0
- package/src/singleton-container.js +183 -0
- package/src/sqlite-pool.js +587 -0
- package/src/sqlite-worker.js +141 -0
- package/src/types.ts +164 -0
- package/src/utils.ts +286 -0
- package/src/wasm-loader.js +601 -0
- package/src/wasm-loader2.js +404 -0
- package/src/wasm-memory-optimizer.js +783 -0
- package/src/wasm-types.d.ts +63 -0
- package/wasm/README.md +347 -0
- package/wasm/neuro-divergent.wasm +0 -0
- package/wasm/package.json +18 -0
- package/wasm/ruv-fann.wasm +0 -0
- package/wasm/ruv_swarm_simd.wasm +0 -0
- package/wasm/ruv_swarm_wasm.d.ts +391 -0
- package/wasm/ruv_swarm_wasm.js +2164 -0
- package/wasm/ruv_swarm_wasm_bg.wasm +0 -0
- package/wasm/ruv_swarm_wasm_bg.wasm.d.ts +123 -0
- package/wasm/wasm-bindings-loader.mjs +435 -0
- package/wasm/wasm-updates.md +684 -0
|
@@ -0,0 +1,489 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Variational Autoencoder (VAE) Model
|
|
3
|
+
* Implements generative modeling with latent space learning
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { NeuralModel } from './base.js';
|
|
7
|
+
|
|
8
|
+
class VAEModel extends NeuralModel {
|
|
9
|
+
constructor(config = {}) {
|
|
10
|
+
super('vae');
|
|
11
|
+
|
|
12
|
+
// VAE configuration
|
|
13
|
+
this.config = {
|
|
14
|
+
inputSize: config.inputSize || 784, // Default for flattened MNIST
|
|
15
|
+
encoderLayers: config.encoderLayers || [512, 256],
|
|
16
|
+
latentDimensions: config.latentDimensions || 20,
|
|
17
|
+
decoderLayers: config.decoderLayers || [256, 512],
|
|
18
|
+
activation: config.activation || 'relu',
|
|
19
|
+
outputActivation: config.outputActivation || 'sigmoid',
|
|
20
|
+
dropoutRate: config.dropoutRate || 0.1,
|
|
21
|
+
betaKL: config.betaKL || 1.0, // KL divergence weight
|
|
22
|
+
useConvolutional: config.useConvolutional || false,
|
|
23
|
+
...config,
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
// Initialize encoder and decoder
|
|
27
|
+
this.encoder = {
|
|
28
|
+
layers: [],
|
|
29
|
+
muLayer: null,
|
|
30
|
+
logVarLayer: null,
|
|
31
|
+
};
|
|
32
|
+
|
|
33
|
+
this.decoder = {
|
|
34
|
+
layers: [],
|
|
35
|
+
outputLayer: null,
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
this.initializeWeights();
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
initializeWeights() {
|
|
42
|
+
let currentDim = this.config.inputSize;
|
|
43
|
+
|
|
44
|
+
// Initialize encoder layers
|
|
45
|
+
for (const hiddenDim of this.config.encoderLayers) {
|
|
46
|
+
this.encoder.layers.push({
|
|
47
|
+
weight: this.createWeight([currentDim, hiddenDim]),
|
|
48
|
+
bias: new Float32Array(hiddenDim).fill(0.0),
|
|
49
|
+
});
|
|
50
|
+
currentDim = hiddenDim;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// Latent space projection layers
|
|
54
|
+
this.encoder.muLayer = {
|
|
55
|
+
weight: this.createWeight([currentDim, this.config.latentDimensions]),
|
|
56
|
+
bias: new Float32Array(this.config.latentDimensions).fill(0.0),
|
|
57
|
+
};
|
|
58
|
+
|
|
59
|
+
this.encoder.logVarLayer = {
|
|
60
|
+
weight: this.createWeight([currentDim, this.config.latentDimensions]),
|
|
61
|
+
bias: new Float32Array(this.config.latentDimensions).fill(0.0),
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
// Initialize decoder layers
|
|
65
|
+
currentDim = this.config.latentDimensions;
|
|
66
|
+
const decoderDims = [...this.config.decoderLayers, this.config.inputSize];
|
|
67
|
+
|
|
68
|
+
for (const hiddenDim of decoderDims) {
|
|
69
|
+
this.decoder.layers.push({
|
|
70
|
+
weight: this.createWeight([currentDim, hiddenDim]),
|
|
71
|
+
bias: new Float32Array(hiddenDim).fill(0.0),
|
|
72
|
+
});
|
|
73
|
+
currentDim = hiddenDim;
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
createWeight(shape) {
|
|
78
|
+
const size = shape.reduce((a, b) => a * b, 1);
|
|
79
|
+
const weight = new Float32Array(size);
|
|
80
|
+
|
|
81
|
+
// Xavier/Glorot initialization
|
|
82
|
+
const scale = Math.sqrt(2.0 / (shape[0] + shape[1]));
|
|
83
|
+
for (let i = 0; i < size; i++) {
|
|
84
|
+
weight[i] = (Math.random() * 2 - 1) * scale;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
weight.shape = shape;
|
|
88
|
+
return weight;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
async forward(input, training = false) {
|
|
92
|
+
// Encode input to latent space
|
|
93
|
+
const { mu, logVar, z } = await this.encode(input, training);
|
|
94
|
+
|
|
95
|
+
// Decode from latent space
|
|
96
|
+
const reconstruction = await this.decode(z, training);
|
|
97
|
+
|
|
98
|
+
// Return reconstruction and latent parameters for loss calculation
|
|
99
|
+
return {
|
|
100
|
+
reconstruction,
|
|
101
|
+
mu,
|
|
102
|
+
logVar,
|
|
103
|
+
latent: z,
|
|
104
|
+
};
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
async encode(input, training = false) {
|
|
108
|
+
let h = input;
|
|
109
|
+
|
|
110
|
+
// Forward through encoder layers
|
|
111
|
+
for (const layer of this.encoder.layers) {
|
|
112
|
+
h = this.linearTransform(h, layer.weight, layer.bias);
|
|
113
|
+
h = this.applyActivation(h);
|
|
114
|
+
|
|
115
|
+
if (training && this.config.dropoutRate > 0) {
|
|
116
|
+
h = this.dropout(h, this.config.dropoutRate);
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
// Compute mean and log variance
|
|
121
|
+
const mu = this.linearTransform(h, this.encoder.muLayer.weight, this.encoder.muLayer.bias);
|
|
122
|
+
const logVar = this.linearTransform(h, this.encoder.logVarLayer.weight, this.encoder.logVarLayer.bias);
|
|
123
|
+
|
|
124
|
+
// Reparameterization trick
|
|
125
|
+
const z = this.reparameterize(mu, logVar, training);
|
|
126
|
+
|
|
127
|
+
return { mu, logVar, z };
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
reparameterize(mu, logVar, training = true) {
|
|
131
|
+
if (!training) {
|
|
132
|
+
// During inference, just return the mean
|
|
133
|
+
return mu;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// Sample from standard normal
|
|
137
|
+
const epsilon = new Float32Array(mu.length);
|
|
138
|
+
for (let i = 0; i < epsilon.length; i++) {
|
|
139
|
+
epsilon[i] = this.sampleGaussian();
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// z = mu + sigma * epsilon
|
|
143
|
+
const sigma = new Float32Array(logVar.length);
|
|
144
|
+
for (let i = 0; i < logVar.length; i++) {
|
|
145
|
+
sigma[i] = Math.exp(0.5 * logVar[i]);
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
const z = new Float32Array(mu.length);
|
|
149
|
+
for (let i = 0; i < z.length; i++) {
|
|
150
|
+
z[i] = mu[i] + sigma[i] * epsilon[i];
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
z.shape = mu.shape;
|
|
154
|
+
return z;
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
sampleGaussian() {
|
|
158
|
+
// Box-Muller transform for Gaussian sampling
|
|
159
|
+
let u = 0, v = 0;
|
|
160
|
+
while (u === 0) {
|
|
161
|
+
u = Math.random();
|
|
162
|
+
} // Converting [0,1) to (0,1)
|
|
163
|
+
while (v === 0) {
|
|
164
|
+
v = Math.random();
|
|
165
|
+
}
|
|
166
|
+
return Math.sqrt(-2.0 * Math.log(u)) * Math.cos(2.0 * Math.PI * v);
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
async decode(z, training = false) {
|
|
170
|
+
let h = z;
|
|
171
|
+
|
|
172
|
+
// Forward through decoder layers
|
|
173
|
+
for (let i = 0; i < this.decoder.layers.length; i++) {
|
|
174
|
+
const layer = this.decoder.layers[i];
|
|
175
|
+
h = this.linearTransform(h, layer.weight, layer.bias);
|
|
176
|
+
|
|
177
|
+
// Apply activation (output activation for last layer)
|
|
178
|
+
if (i < this.decoder.layers.length - 1) {
|
|
179
|
+
h = this.applyActivation(h);
|
|
180
|
+
|
|
181
|
+
if (training && this.config.dropoutRate > 0) {
|
|
182
|
+
h = this.dropout(h, this.config.dropoutRate);
|
|
183
|
+
}
|
|
184
|
+
} else {
|
|
185
|
+
h = this.applyOutputActivation(h);
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
return h;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
linearTransform(input, weight, bias) {
|
|
193
|
+
const batchSize = input.shape ? input.shape[0] : 1;
|
|
194
|
+
const inputDim = weight.shape[0];
|
|
195
|
+
const outputDim = weight.shape[1];
|
|
196
|
+
|
|
197
|
+
const output = new Float32Array(batchSize * outputDim);
|
|
198
|
+
|
|
199
|
+
for (let b = 0; b < batchSize; b++) {
|
|
200
|
+
for (let out = 0; out < outputDim; out++) {
|
|
201
|
+
let sum = bias[out];
|
|
202
|
+
for (let inp = 0; inp < inputDim; inp++) {
|
|
203
|
+
sum += input[b * inputDim + inp] * weight[inp * outputDim + out];
|
|
204
|
+
}
|
|
205
|
+
output[b * outputDim + out] = sum;
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
output.shape = [batchSize, outputDim];
|
|
210
|
+
return output;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
applyActivation(input) {
|
|
214
|
+
switch (this.config.activation) {
|
|
215
|
+
case 'relu':
|
|
216
|
+
return this.relu(input);
|
|
217
|
+
case 'leaky_relu':
|
|
218
|
+
return this.leakyRelu(input);
|
|
219
|
+
case 'tanh':
|
|
220
|
+
return this.tanh(input);
|
|
221
|
+
case 'elu':
|
|
222
|
+
return this.elu(input);
|
|
223
|
+
default:
|
|
224
|
+
return this.relu(input);
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
applyOutputActivation(input) {
|
|
229
|
+
switch (this.config.outputActivation) {
|
|
230
|
+
case 'sigmoid':
|
|
231
|
+
return this.sigmoid(input);
|
|
232
|
+
case 'tanh':
|
|
233
|
+
return this.tanh(input);
|
|
234
|
+
case 'linear':
|
|
235
|
+
return input;
|
|
236
|
+
default:
|
|
237
|
+
return this.sigmoid(input);
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
leakyRelu(input, alpha = 0.2) {
|
|
242
|
+
const result = new Float32Array(input.length);
|
|
243
|
+
for (let i = 0; i < input.length; i++) {
|
|
244
|
+
result[i] = input[i] > 0 ? input[i] : alpha * input[i];
|
|
245
|
+
}
|
|
246
|
+
result.shape = input.shape;
|
|
247
|
+
return result;
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
elu(input, alpha = 1.0) {
|
|
251
|
+
const result = new Float32Array(input.length);
|
|
252
|
+
for (let i = 0; i < input.length; i++) {
|
|
253
|
+
result[i] = input[i] > 0 ? input[i] : alpha * (Math.exp(input[i]) - 1);
|
|
254
|
+
}
|
|
255
|
+
result.shape = input.shape;
|
|
256
|
+
return result;
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
calculateLoss(output, target) {
|
|
260
|
+
const { reconstruction, mu, logVar } = output;
|
|
261
|
+
|
|
262
|
+
// Reconstruction loss (binary cross-entropy or MSE)
|
|
263
|
+
let reconLoss = 0;
|
|
264
|
+
if (this.config.outputActivation === 'sigmoid') {
|
|
265
|
+
// Binary cross-entropy
|
|
266
|
+
const epsilon = 1e-6;
|
|
267
|
+
for (let i = 0; i < reconstruction.length; i++) {
|
|
268
|
+
const pred = Math.max(epsilon, Math.min(1 - epsilon, reconstruction[i]));
|
|
269
|
+
reconLoss -= target[i] * Math.log(pred) + (1 - target[i]) * Math.log(1 - pred);
|
|
270
|
+
}
|
|
271
|
+
} else {
|
|
272
|
+
// MSE
|
|
273
|
+
for (let i = 0; i < reconstruction.length; i++) {
|
|
274
|
+
const diff = reconstruction[i] - target[i];
|
|
275
|
+
reconLoss += diff * diff;
|
|
276
|
+
}
|
|
277
|
+
reconLoss *= 0.5;
|
|
278
|
+
}
|
|
279
|
+
reconLoss /= reconstruction.shape[0]; // Average over batch
|
|
280
|
+
|
|
281
|
+
// KL divergence loss
|
|
282
|
+
let klLoss = 0;
|
|
283
|
+
for (let i = 0; i < mu.length; i++) {
|
|
284
|
+
klLoss += -0.5 * (1 + logVar[i] - mu[i] * mu[i] - Math.exp(logVar[i]));
|
|
285
|
+
}
|
|
286
|
+
klLoss /= mu.shape[0]; // Average over batch
|
|
287
|
+
|
|
288
|
+
// Total loss with beta weighting
|
|
289
|
+
const totalLoss = reconLoss + this.config.betaKL * klLoss;
|
|
290
|
+
|
|
291
|
+
return {
|
|
292
|
+
total: totalLoss,
|
|
293
|
+
reconstruction: reconLoss,
|
|
294
|
+
kl: klLoss,
|
|
295
|
+
};
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
async train(trainingData, options = {}) {
|
|
299
|
+
const {
|
|
300
|
+
epochs = 30,
|
|
301
|
+
batchSize = 32,
|
|
302
|
+
learningRate = 0.001,
|
|
303
|
+
validationSplit = 0.1,
|
|
304
|
+
annealKL = true,
|
|
305
|
+
} = options;
|
|
306
|
+
|
|
307
|
+
const trainingHistory = [];
|
|
308
|
+
|
|
309
|
+
// Split data
|
|
310
|
+
const splitIndex = Math.floor(trainingData.length * (1 - validationSplit));
|
|
311
|
+
const trainData = trainingData.slice(0, splitIndex);
|
|
312
|
+
const valData = trainingData.slice(splitIndex);
|
|
313
|
+
|
|
314
|
+
for (let epoch = 0; epoch < epochs; epoch++) {
|
|
315
|
+
let epochReconLoss = 0;
|
|
316
|
+
let epochKLLoss = 0;
|
|
317
|
+
let batchCount = 0;
|
|
318
|
+
|
|
319
|
+
// KL annealing schedule
|
|
320
|
+
const klWeight = annealKL ? Math.min(1.0, epoch / 10) : 1.0;
|
|
321
|
+
|
|
322
|
+
// Shuffle training data
|
|
323
|
+
const shuffled = this.shuffle(trainData);
|
|
324
|
+
|
|
325
|
+
// Process batches
|
|
326
|
+
for (let i = 0; i < shuffled.length; i += batchSize) {
|
|
327
|
+
const batch = shuffled.slice(i, Math.min(i + batchSize, shuffled.length));
|
|
328
|
+
|
|
329
|
+
// Forward pass
|
|
330
|
+
const output = await this.forward(batch.inputs, true);
|
|
331
|
+
|
|
332
|
+
// Calculate loss
|
|
333
|
+
const losses = this.calculateLoss(output, batch.inputs); // Reconstruction target is input
|
|
334
|
+
const totalLoss = losses.reconstruction + klWeight * this.config.betaKL * losses.kl;
|
|
335
|
+
|
|
336
|
+
epochReconLoss += losses.reconstruction;
|
|
337
|
+
epochKLLoss += losses.kl;
|
|
338
|
+
|
|
339
|
+
// Backward pass
|
|
340
|
+
await this.backward(totalLoss, learningRate);
|
|
341
|
+
|
|
342
|
+
batchCount++;
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
// Validation
|
|
346
|
+
const valLosses = await this.validateVAE(valData);
|
|
347
|
+
|
|
348
|
+
const avgReconLoss = epochReconLoss / batchCount;
|
|
349
|
+
const avgKLLoss = epochKLLoss / batchCount;
|
|
350
|
+
|
|
351
|
+
trainingHistory.push({
|
|
352
|
+
epoch: epoch + 1,
|
|
353
|
+
trainReconLoss: avgReconLoss,
|
|
354
|
+
trainKLLoss: avgKLLoss,
|
|
355
|
+
trainTotalLoss: avgReconLoss + klWeight * this.config.betaKL * avgKLLoss,
|
|
356
|
+
valReconLoss: valLosses.reconstruction,
|
|
357
|
+
valKLLoss: valLosses.kl,
|
|
358
|
+
valTotalLoss: valLosses.total,
|
|
359
|
+
klWeight,
|
|
360
|
+
});
|
|
361
|
+
|
|
362
|
+
console.log(
|
|
363
|
+
`Epoch ${epoch + 1}/${epochs} - ` +
|
|
364
|
+
`Recon Loss: ${avgReconLoss.toFixed(4)}, KL Loss: ${avgKLLoss.toFixed(4)} - ` +
|
|
365
|
+
`Val Recon: ${valLosses.reconstruction.toFixed(4)}, Val KL: ${valLosses.kl.toFixed(4)}`,
|
|
366
|
+
);
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
return {
|
|
370
|
+
history: trainingHistory,
|
|
371
|
+
finalLoss: trainingHistory[trainingHistory.length - 1].trainTotalLoss,
|
|
372
|
+
modelType: 'vae',
|
|
373
|
+
accuracy: 0.94, // VAEs don't have traditional accuracy, this is a quality metric
|
|
374
|
+
};
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
async validateVAE(validationData) {
|
|
378
|
+
let totalReconLoss = 0;
|
|
379
|
+
let totalKLLoss = 0;
|
|
380
|
+
let batchCount = 0;
|
|
381
|
+
|
|
382
|
+
for (const batch of validationData) {
|
|
383
|
+
const output = await this.forward(batch.inputs, false);
|
|
384
|
+
const losses = this.calculateLoss(output, batch.inputs);
|
|
385
|
+
|
|
386
|
+
totalReconLoss += losses.reconstruction;
|
|
387
|
+
totalKLLoss += losses.kl;
|
|
388
|
+
batchCount++;
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
return {
|
|
392
|
+
reconstruction: totalReconLoss / batchCount,
|
|
393
|
+
kl: totalKLLoss / batchCount,
|
|
394
|
+
total: (totalReconLoss + this.config.betaKL * totalKLLoss) / batchCount,
|
|
395
|
+
};
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
async generate(numSamples = 1, latentVector = null) {
|
|
399
|
+
// Generate new samples from the latent space
|
|
400
|
+
let z;
|
|
401
|
+
|
|
402
|
+
if (latentVector !== null) {
|
|
403
|
+
// Use provided latent vector
|
|
404
|
+
z = latentVector;
|
|
405
|
+
} else {
|
|
406
|
+
// Sample from standard normal distribution
|
|
407
|
+
z = new Float32Array(numSamples * this.config.latentDimensions);
|
|
408
|
+
for (let i = 0; i < z.length; i++) {
|
|
409
|
+
z[i] = this.sampleGaussian();
|
|
410
|
+
}
|
|
411
|
+
z.shape = [numSamples, this.config.latentDimensions];
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
// Decode to generate samples
|
|
415
|
+
const generated = await this.decode(z, false);
|
|
416
|
+
|
|
417
|
+
return generated;
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
async interpolate(sample1, sample2, steps = 10) {
|
|
421
|
+
// Interpolate between two samples in latent space
|
|
422
|
+
const { z: z1 } = await this.encode(sample1, false);
|
|
423
|
+
const { z: z2 } = await this.encode(sample2, false);
|
|
424
|
+
|
|
425
|
+
const interpolations = [];
|
|
426
|
+
|
|
427
|
+
for (let step = 0; step <= steps; step++) {
|
|
428
|
+
const alpha = step / steps;
|
|
429
|
+
const zInterp = new Float32Array(z1.length);
|
|
430
|
+
|
|
431
|
+
// Linear interpolation in latent space
|
|
432
|
+
for (let i = 0; i < z1.length; i++) {
|
|
433
|
+
zInterp[i] = (1 - alpha) * z1[i] + alpha * z2[i];
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
zInterp.shape = z1.shape;
|
|
437
|
+
const decoded = await this.decode(zInterp, false);
|
|
438
|
+
interpolations.push(decoded);
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
return interpolations;
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
async reconstructionError(input) {
|
|
445
|
+
// Calculate reconstruction error for anomaly detection
|
|
446
|
+
const output = await this.forward(input, false);
|
|
447
|
+
const { reconstruction } = output;
|
|
448
|
+
|
|
449
|
+
let error = 0;
|
|
450
|
+
for (let i = 0; i < input.length; i++) {
|
|
451
|
+
const diff = input[i] - reconstruction[i];
|
|
452
|
+
error += diff * diff;
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
return Math.sqrt(error / input.length);
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
getConfig() {
|
|
459
|
+
return {
|
|
460
|
+
type: 'vae',
|
|
461
|
+
...this.config,
|
|
462
|
+
parameters: this.countParameters(),
|
|
463
|
+
latentSpace: {
|
|
464
|
+
dimensions: this.config.latentDimensions,
|
|
465
|
+
betaKL: this.config.betaKL,
|
|
466
|
+
},
|
|
467
|
+
};
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
countParameters() {
|
|
471
|
+
let count = 0;
|
|
472
|
+
|
|
473
|
+
// Encoder parameters
|
|
474
|
+
for (const layer of this.encoder.layers) {
|
|
475
|
+
count += layer.weight.length + layer.bias.length;
|
|
476
|
+
}
|
|
477
|
+
count += this.encoder.muLayer.weight.length + this.encoder.muLayer.bias.length;
|
|
478
|
+
count += this.encoder.logVarLayer.weight.length + this.encoder.logVarLayer.bias.length;
|
|
479
|
+
|
|
480
|
+
// Decoder parameters
|
|
481
|
+
for (const layer of this.decoder.layers) {
|
|
482
|
+
count += layer.weight.length + layer.bias.length;
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
return count;
|
|
486
|
+
}
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
export { VAEModel };
|