@sparkleideas/ruv-swarm 1.0.18-patch.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1565 -0
- package/bin/ruv-swarm-clean.js +1872 -0
- package/bin/ruv-swarm-memory.js +119 -0
- package/bin/ruv-swarm-secure-heartbeat.js +1549 -0
- package/bin/ruv-swarm-secure.js +1689 -0
- package/package.json +221 -0
- package/src/agent.ts +342 -0
- package/src/benchmark.js +267 -0
- package/src/claude-flow-enhanced.js +839 -0
- package/src/claude-integration/advanced-commands.js +561 -0
- package/src/claude-integration/core.js +112 -0
- package/src/claude-integration/docs.js +1548 -0
- package/src/claude-integration/env-template.js +39 -0
- package/src/claude-integration/index.js +209 -0
- package/src/claude-integration/remote.js +408 -0
- package/src/cli-diagnostics.js +364 -0
- package/src/cognitive-pattern-evolution.js +1317 -0
- package/src/daa-cognition.js +977 -0
- package/src/daa-service.d.ts +298 -0
- package/src/daa-service.js +1116 -0
- package/src/diagnostics.js +533 -0
- package/src/errors.js +528 -0
- package/src/github-coordinator/README.md +193 -0
- package/src/github-coordinator/claude-hooks.js +162 -0
- package/src/github-coordinator/gh-cli-coordinator.js +260 -0
- package/src/hooks/cli.js +82 -0
- package/src/hooks/index.js +1900 -0
- package/src/index-enhanced.d.ts +371 -0
- package/src/index-enhanced.js +734 -0
- package/src/index.d.ts +287 -0
- package/src/index.js +405 -0
- package/src/index.ts +457 -0
- package/src/logger.js +182 -0
- package/src/logging-config.js +179 -0
- package/src/mcp-daa-tools.js +735 -0
- package/src/mcp-tools-benchmarks.js +328 -0
- package/src/mcp-tools-enhanced.js +2863 -0
- package/src/memory-config.js +42 -0
- package/src/meta-learning-framework.js +1359 -0
- package/src/neural-agent.js +830 -0
- package/src/neural-coordination-protocol.js +1363 -0
- package/src/neural-models/README.md +118 -0
- package/src/neural-models/autoencoder.js +543 -0
- package/src/neural-models/base.js +269 -0
- package/src/neural-models/cnn.js +497 -0
- package/src/neural-models/gnn.js +447 -0
- package/src/neural-models/gru.js +536 -0
- package/src/neural-models/index.js +273 -0
- package/src/neural-models/lstm.js +551 -0
- package/src/neural-models/neural-presets-complete.js +1306 -0
- package/src/neural-models/presets/graph.js +392 -0
- package/src/neural-models/presets/index.js +279 -0
- package/src/neural-models/presets/nlp.js +328 -0
- package/src/neural-models/presets/timeseries.js +368 -0
- package/src/neural-models/presets/vision.js +387 -0
- package/src/neural-models/resnet.js +534 -0
- package/src/neural-models/transformer.js +515 -0
- package/src/neural-models/vae.js +489 -0
- package/src/neural-network-manager.js +1938 -0
- package/src/neural-network.ts +296 -0
- package/src/neural.js +574 -0
- package/src/performance-benchmarks.js +898 -0
- package/src/performance.js +458 -0
- package/src/persistence-pooled.js +695 -0
- package/src/persistence.js +480 -0
- package/src/schemas.js +864 -0
- package/src/security.js +218 -0
- package/src/singleton-container.js +183 -0
- package/src/sqlite-pool.js +587 -0
- package/src/sqlite-worker.js +141 -0
- package/src/types.ts +164 -0
- package/src/utils.ts +286 -0
- package/src/wasm-loader.js +601 -0
- package/src/wasm-loader2.js +404 -0
- package/src/wasm-memory-optimizer.js +783 -0
- package/src/wasm-types.d.ts +63 -0
- package/wasm/README.md +347 -0
- package/wasm/neuro-divergent.wasm +0 -0
- package/wasm/package.json +18 -0
- package/wasm/ruv-fann.wasm +0 -0
- package/wasm/ruv_swarm_simd.wasm +0 -0
- package/wasm/ruv_swarm_wasm.d.ts +391 -0
- package/wasm/ruv_swarm_wasm.js +2164 -0
- package/wasm/ruv_swarm_wasm_bg.wasm +0 -0
- package/wasm/ruv_swarm_wasm_bg.wasm.d.ts +123 -0
- package/wasm/wasm-bindings-loader.mjs +435 -0
- package/wasm/wasm-updates.md +684 -0
|
@@ -0,0 +1,497 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Convolutional Neural Network (CNN) Model
|
|
3
|
+
* For pattern recognition and image processing tasks
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { NeuralModel } from './base.js';
|
|
7
|
+
|
|
8
|
+
class CNNModel extends NeuralModel {
|
|
9
|
+
constructor(config = {}) {
|
|
10
|
+
super('cnn');
|
|
11
|
+
|
|
12
|
+
// CNN configuration
|
|
13
|
+
this.config = {
|
|
14
|
+
inputShape: config.inputShape || [28, 28, 1], // [height, width, channels]
|
|
15
|
+
convLayers: config.convLayers || [
|
|
16
|
+
{ filters: 32, kernelSize: 3, stride: 1, padding: 'same', activation: 'relu' },
|
|
17
|
+
{ filters: 64, kernelSize: 3, stride: 1, padding: 'same', activation: 'relu' },
|
|
18
|
+
{ filters: 128, kernelSize: 3, stride: 1, padding: 'same', activation: 'relu' },
|
|
19
|
+
],
|
|
20
|
+
poolingSize: config.poolingSize || 2,
|
|
21
|
+
denseLayers: config.denseLayers || [128, 64],
|
|
22
|
+
outputSize: config.outputSize || 10,
|
|
23
|
+
dropoutRate: config.dropoutRate || 0.5,
|
|
24
|
+
...config,
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
// Initialize layers
|
|
28
|
+
this.convWeights = [];
|
|
29
|
+
this.convBiases = [];
|
|
30
|
+
this.denseWeights = [];
|
|
31
|
+
this.denseBiases = [];
|
|
32
|
+
|
|
33
|
+
this.initializeWeights();
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
initializeWeights() {
|
|
37
|
+
let currentShape = [...this.config.inputShape];
|
|
38
|
+
|
|
39
|
+
// Initialize convolutional layers
|
|
40
|
+
for (const convLayer of this.config.convLayers) {
|
|
41
|
+
const { filters, kernelSize } = convLayer;
|
|
42
|
+
const inputChannels = currentShape[2];
|
|
43
|
+
|
|
44
|
+
// Initialize kernel weights [kernelSize, kernelSize, inputChannels, filters]
|
|
45
|
+
const kernelWeights = this.createWeight([
|
|
46
|
+
kernelSize,
|
|
47
|
+
kernelSize,
|
|
48
|
+
inputChannels,
|
|
49
|
+
filters,
|
|
50
|
+
]);
|
|
51
|
+
|
|
52
|
+
this.convWeights.push({
|
|
53
|
+
kernel: kernelWeights,
|
|
54
|
+
shape: [kernelSize, kernelSize, inputChannels, filters],
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
// Initialize biases for each filter
|
|
58
|
+
this.convBiases.push(new Float32Array(filters).fill(0));
|
|
59
|
+
|
|
60
|
+
// Update shape for next layer
|
|
61
|
+
currentShape = this.getConvOutputShape(currentShape, convLayer);
|
|
62
|
+
|
|
63
|
+
// Apply pooling
|
|
64
|
+
if (this.config.poolingSize > 1) {
|
|
65
|
+
currentShape = [
|
|
66
|
+
Math.floor(currentShape[0] / this.config.poolingSize),
|
|
67
|
+
Math.floor(currentShape[1] / this.config.poolingSize),
|
|
68
|
+
currentShape[2],
|
|
69
|
+
];
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// Calculate flattened size
|
|
74
|
+
const flattenedSize = currentShape.reduce((a, b) => a * b, 1);
|
|
75
|
+
|
|
76
|
+
// Initialize dense layers
|
|
77
|
+
let lastSize = flattenedSize;
|
|
78
|
+
for (const units of this.config.denseLayers) {
|
|
79
|
+
this.denseWeights.push(this.createWeight([lastSize, units]));
|
|
80
|
+
this.denseBiases.push(new Float32Array(units).fill(0));
|
|
81
|
+
lastSize = units;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Output layer
|
|
85
|
+
this.denseWeights.push(this.createWeight([lastSize, this.config.outputSize]));
|
|
86
|
+
this.denseBiases.push(new Float32Array(this.config.outputSize).fill(0));
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
createWeight(shape) {
|
|
90
|
+
const size = shape.reduce((a, b) => a * b, 1);
|
|
91
|
+
const weight = new Float32Array(size);
|
|
92
|
+
|
|
93
|
+
// He initialization for ReLU activation
|
|
94
|
+
const fanIn = shape.slice(0, -1).reduce((a, b) => a * b, 1);
|
|
95
|
+
const scale = Math.sqrt(2.0 / fanIn);
|
|
96
|
+
|
|
97
|
+
for (let i = 0; i < size; i++) {
|
|
98
|
+
weight[i] = (Math.random() * 2 - 1) * scale;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
return weight;
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
getConvOutputShape(inputShape, convLayer) {
|
|
105
|
+
const [height, width, channels] = inputShape;
|
|
106
|
+
const { filters, kernelSize, stride = 1, padding } = convLayer;
|
|
107
|
+
|
|
108
|
+
let outputHeight, outputWidth;
|
|
109
|
+
|
|
110
|
+
if (padding === 'same') {
|
|
111
|
+
outputHeight = Math.ceil(height / stride);
|
|
112
|
+
outputWidth = Math.ceil(width / stride);
|
|
113
|
+
} else {
|
|
114
|
+
outputHeight = Math.floor((height - kernelSize) / stride) + 1;
|
|
115
|
+
outputWidth = Math.floor((width - kernelSize) / stride) + 1;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
return [outputHeight, outputWidth, filters];
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
async forward(input, training = false) {
|
|
122
|
+
let x = input;
|
|
123
|
+
|
|
124
|
+
// Convolutional layers
|
|
125
|
+
for (let i = 0; i < this.config.convLayers.length; i++) {
|
|
126
|
+
x = this.conv2d(x, i);
|
|
127
|
+
|
|
128
|
+
// Apply activation
|
|
129
|
+
const { activation } = this.config.convLayers[i];
|
|
130
|
+
if (activation === 'relu') {
|
|
131
|
+
x = this.relu(x);
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// Apply pooling
|
|
135
|
+
if (this.config.poolingSize > 1) {
|
|
136
|
+
x = this.maxPool2d(x, this.config.poolingSize);
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
// Flatten
|
|
141
|
+
x = this.flatten(x);
|
|
142
|
+
|
|
143
|
+
// Dense layers
|
|
144
|
+
for (let i = 0; i < this.config.denseLayers.length; i++) {
|
|
145
|
+
x = this.dense(x, this.denseWeights[i], this.denseBiases[i]);
|
|
146
|
+
x = this.relu(x);
|
|
147
|
+
|
|
148
|
+
// Apply dropout if training
|
|
149
|
+
if (training && this.config.dropoutRate > 0) {
|
|
150
|
+
x = this.dropout(x, this.config.dropoutRate);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
// Output layer
|
|
155
|
+
const outputIndex = this.denseWeights.length - 1;
|
|
156
|
+
x = this.dense(x, this.denseWeights[outputIndex], this.denseBiases[outputIndex]);
|
|
157
|
+
|
|
158
|
+
// Apply softmax for classification
|
|
159
|
+
x = this.softmax(x);
|
|
160
|
+
|
|
161
|
+
return x;
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
conv2d(input, layerIndex) {
|
|
165
|
+
const convLayer = this.config.convLayers[layerIndex];
|
|
166
|
+
const weights = this.convWeights[layerIndex];
|
|
167
|
+
const biases = this.convBiases[layerIndex];
|
|
168
|
+
|
|
169
|
+
const [batchSize, height, width, inputChannels] = input.shape;
|
|
170
|
+
const { filters, kernelSize, stride = 1, padding } = convLayer;
|
|
171
|
+
|
|
172
|
+
// Calculate output dimensions
|
|
173
|
+
const outputShape = this.getConvOutputShape([height, width, inputChannels], convLayer);
|
|
174
|
+
const [outputHeight, outputWidth, outputChannels] = outputShape;
|
|
175
|
+
|
|
176
|
+
const output = new Float32Array(batchSize * outputHeight * outputWidth * outputChannels);
|
|
177
|
+
|
|
178
|
+
// Apply convolution
|
|
179
|
+
for (let b = 0; b < batchSize; b++) {
|
|
180
|
+
for (let oh = 0; oh < outputHeight; oh++) {
|
|
181
|
+
for (let ow = 0; ow < outputWidth; ow++) {
|
|
182
|
+
for (let oc = 0; oc < outputChannels; oc++) {
|
|
183
|
+
let sum = biases[oc];
|
|
184
|
+
|
|
185
|
+
// Apply kernel
|
|
186
|
+
for (let kh = 0; kh < kernelSize; kh++) {
|
|
187
|
+
for (let kw = 0; kw < kernelSize; kw++) {
|
|
188
|
+
for (let ic = 0; ic < inputChannels; ic++) {
|
|
189
|
+
let ih = oh * stride + kh;
|
|
190
|
+
let iw = ow * stride + kw;
|
|
191
|
+
|
|
192
|
+
// Handle padding
|
|
193
|
+
if (padding === 'same') {
|
|
194
|
+
ih -= Math.floor(kernelSize / 2);
|
|
195
|
+
iw -= Math.floor(kernelSize / 2);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// Check bounds
|
|
199
|
+
if (ih >= 0 && ih < height && iw >= 0 && iw < width) {
|
|
200
|
+
const inputIdx = b * height * width * inputChannels +
|
|
201
|
+
ih * width * inputChannels +
|
|
202
|
+
iw * inputChannels + ic;
|
|
203
|
+
|
|
204
|
+
const weightIdx = kh * kernelSize * inputChannels * filters +
|
|
205
|
+
kw * inputChannels * filters +
|
|
206
|
+
ic * filters + oc;
|
|
207
|
+
|
|
208
|
+
sum += input[inputIdx] * weights.kernel[weightIdx];
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
const outputIdx = b * outputHeight * outputWidth * outputChannels +
|
|
215
|
+
oh * outputWidth * outputChannels +
|
|
216
|
+
ow * outputChannels + oc;
|
|
217
|
+
|
|
218
|
+
output[outputIdx] = sum;
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
output.shape = [batchSize, outputHeight, outputWidth, outputChannels];
|
|
225
|
+
return output;
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
maxPool2d(input, poolSize) {
|
|
229
|
+
const [batchSize, height, width, channels] = input.shape;
|
|
230
|
+
const outputHeight = Math.floor(height / poolSize);
|
|
231
|
+
const outputWidth = Math.floor(width / poolSize);
|
|
232
|
+
|
|
233
|
+
const output = new Float32Array(batchSize * outputHeight * outputWidth * channels);
|
|
234
|
+
|
|
235
|
+
for (let b = 0; b < batchSize; b++) {
|
|
236
|
+
for (let oh = 0; oh < outputHeight; oh++) {
|
|
237
|
+
for (let ow = 0; ow < outputWidth; ow++) {
|
|
238
|
+
for (let c = 0; c < channels; c++) {
|
|
239
|
+
let maxVal = -Infinity;
|
|
240
|
+
|
|
241
|
+
// Find max in pool window
|
|
242
|
+
for (let ph = 0; ph < poolSize; ph++) {
|
|
243
|
+
for (let pw = 0; pw < poolSize; pw++) {
|
|
244
|
+
const ih = oh * poolSize + ph;
|
|
245
|
+
const iw = ow * poolSize + pw;
|
|
246
|
+
|
|
247
|
+
if (ih < height && iw < width) {
|
|
248
|
+
const inputIdx = b * height * width * channels +
|
|
249
|
+
ih * width * channels +
|
|
250
|
+
iw * channels + c;
|
|
251
|
+
|
|
252
|
+
maxVal = Math.max(maxVal, input[inputIdx]);
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
const outputIdx = b * outputHeight * outputWidth * channels +
|
|
258
|
+
oh * outputWidth * channels +
|
|
259
|
+
ow * channels + c;
|
|
260
|
+
|
|
261
|
+
output[outputIdx] = maxVal;
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
output.shape = [batchSize, outputHeight, outputWidth, channels];
|
|
268
|
+
return output;
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
flatten(input) {
|
|
272
|
+
const [batchSize, ...dims] = input.shape;
|
|
273
|
+
const flatSize = dims.reduce((a, b) => a * b, 1);
|
|
274
|
+
|
|
275
|
+
const output = new Float32Array(batchSize * flatSize);
|
|
276
|
+
|
|
277
|
+
// Copy data in flattened order
|
|
278
|
+
for (let i = 0; i < output.length; i++) {
|
|
279
|
+
output[i] = input[i];
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
output.shape = [batchSize, flatSize];
|
|
283
|
+
return output;
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
dense(input, weights, biases) {
|
|
287
|
+
const [batchSize, inputSize] = input.shape;
|
|
288
|
+
const outputSize = biases.length;
|
|
289
|
+
|
|
290
|
+
const output = new Float32Array(batchSize * outputSize);
|
|
291
|
+
|
|
292
|
+
for (let b = 0; b < batchSize; b++) {
|
|
293
|
+
for (let o = 0; o < outputSize; o++) {
|
|
294
|
+
let sum = biases[o];
|
|
295
|
+
|
|
296
|
+
for (let i = 0; i < inputSize; i++) {
|
|
297
|
+
sum += input[b * inputSize + i] * weights[i * outputSize + o];
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
output[b * outputSize + o] = sum;
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
output.shape = [batchSize, outputSize];
|
|
305
|
+
return output;
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
softmax(input) {
|
|
309
|
+
const [batchSize, size] = input.shape;
|
|
310
|
+
const output = new Float32Array(input.length);
|
|
311
|
+
|
|
312
|
+
for (let b = 0; b < batchSize; b++) {
|
|
313
|
+
const offset = b * size;
|
|
314
|
+
let maxVal = -Infinity;
|
|
315
|
+
|
|
316
|
+
// Find max for numerical stability
|
|
317
|
+
for (let i = 0; i < size; i++) {
|
|
318
|
+
maxVal = Math.max(maxVal, input[offset + i]);
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
// Compute exp and sum
|
|
322
|
+
let sumExp = 0;
|
|
323
|
+
for (let i = 0; i < size; i++) {
|
|
324
|
+
output[offset + i] = Math.exp(input[offset + i] - maxVal);
|
|
325
|
+
sumExp += output[offset + i];
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
// Normalize
|
|
329
|
+
for (let i = 0; i < size; i++) {
|
|
330
|
+
output[offset + i] /= sumExp;
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
output.shape = input.shape;
|
|
335
|
+
return output;
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
async train(trainingData, options = {}) {
|
|
339
|
+
const {
|
|
340
|
+
epochs = 10,
|
|
341
|
+
batchSize = 32,
|
|
342
|
+
learningRate = 0.001,
|
|
343
|
+
validationSplit = 0.1,
|
|
344
|
+
} = options;
|
|
345
|
+
|
|
346
|
+
const trainingHistory = [];
|
|
347
|
+
|
|
348
|
+
// Split data
|
|
349
|
+
const splitIndex = Math.floor(trainingData.length * (1 - validationSplit));
|
|
350
|
+
const trainData = trainingData.slice(0, splitIndex);
|
|
351
|
+
const valData = trainingData.slice(splitIndex);
|
|
352
|
+
|
|
353
|
+
for (let epoch = 0; epoch < epochs; epoch++) {
|
|
354
|
+
let epochLoss = 0;
|
|
355
|
+
let epochAccuracy = 0;
|
|
356
|
+
let batchCount = 0;
|
|
357
|
+
|
|
358
|
+
// Shuffle training data
|
|
359
|
+
const shuffled = this.shuffle(trainData);
|
|
360
|
+
|
|
361
|
+
// Process batches
|
|
362
|
+
for (let i = 0; i < shuffled.length; i += batchSize) {
|
|
363
|
+
const batch = shuffled.slice(i, Math.min(i + batchSize, shuffled.length));
|
|
364
|
+
|
|
365
|
+
// Forward pass
|
|
366
|
+
const predictions = await this.forward(batch.inputs, true);
|
|
367
|
+
|
|
368
|
+
// Calculate loss and accuracy
|
|
369
|
+
const loss = this.crossEntropyLoss(predictions, batch.targets);
|
|
370
|
+
const accuracy = this.calculateAccuracy(predictions, batch.targets);
|
|
371
|
+
|
|
372
|
+
epochLoss += loss;
|
|
373
|
+
epochAccuracy += accuracy;
|
|
374
|
+
|
|
375
|
+
// Backward pass (simplified)
|
|
376
|
+
await this.backward(loss, learningRate);
|
|
377
|
+
|
|
378
|
+
batchCount++;
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
// Validation
|
|
382
|
+
const valMetrics = await this.evaluate(valData);
|
|
383
|
+
|
|
384
|
+
const avgTrainLoss = epochLoss / batchCount;
|
|
385
|
+
const avgTrainAccuracy = epochAccuracy / batchCount;
|
|
386
|
+
|
|
387
|
+
trainingHistory.push({
|
|
388
|
+
epoch: epoch + 1,
|
|
389
|
+
trainLoss: avgTrainLoss,
|
|
390
|
+
trainAccuracy: avgTrainAccuracy,
|
|
391
|
+
valLoss: valMetrics.loss,
|
|
392
|
+
valAccuracy: valMetrics.accuracy,
|
|
393
|
+
});
|
|
394
|
+
|
|
395
|
+
console.log(
|
|
396
|
+
`Epoch ${epoch + 1}/${epochs} - ` +
|
|
397
|
+
`Train Loss: ${avgTrainLoss.toFixed(4)}, ` +
|
|
398
|
+
`Train Acc: ${(avgTrainAccuracy * 100).toFixed(2)}%, ` +
|
|
399
|
+
`Val Loss: ${valMetrics.loss.toFixed(4)}, ` +
|
|
400
|
+
`Val Acc: ${(valMetrics.accuracy * 100).toFixed(2)}%`,
|
|
401
|
+
);
|
|
402
|
+
|
|
403
|
+
this.updateMetrics(avgTrainLoss, avgTrainAccuracy);
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
return {
|
|
407
|
+
history: trainingHistory,
|
|
408
|
+
finalLoss: trainingHistory[trainingHistory.length - 1].trainLoss,
|
|
409
|
+
finalAccuracy: trainingHistory[trainingHistory.length - 1].trainAccuracy,
|
|
410
|
+
modelType: 'cnn',
|
|
411
|
+
};
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
async evaluate(data) {
|
|
415
|
+
let totalLoss = 0;
|
|
416
|
+
let totalAccuracy = 0;
|
|
417
|
+
let batchCount = 0;
|
|
418
|
+
|
|
419
|
+
for (const batch of data) {
|
|
420
|
+
const predictions = await this.forward(batch.inputs, false);
|
|
421
|
+
const loss = this.crossEntropyLoss(predictions, batch.targets);
|
|
422
|
+
const accuracy = this.calculateAccuracy(predictions, batch.targets);
|
|
423
|
+
|
|
424
|
+
totalLoss += loss;
|
|
425
|
+
totalAccuracy += accuracy;
|
|
426
|
+
batchCount++;
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
return {
|
|
430
|
+
loss: totalLoss / batchCount,
|
|
431
|
+
accuracy: totalAccuracy / batchCount,
|
|
432
|
+
};
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
calculateAccuracy(predictions, targets) {
|
|
436
|
+
let correct = 0;
|
|
437
|
+
const batchSize = predictions.shape[0];
|
|
438
|
+
const numClasses = predictions.shape[1];
|
|
439
|
+
|
|
440
|
+
for (let b = 0; b < batchSize; b++) {
|
|
441
|
+
let predClass = 0;
|
|
442
|
+
let maxProb = -Infinity;
|
|
443
|
+
|
|
444
|
+
// Find predicted class
|
|
445
|
+
for (let c = 0; c < numClasses; c++) {
|
|
446
|
+
const prob = predictions[b * numClasses + c];
|
|
447
|
+
if (prob > maxProb) {
|
|
448
|
+
maxProb = prob;
|
|
449
|
+
predClass = c;
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
// Find true class
|
|
454
|
+
let trueClass = 0;
|
|
455
|
+
for (let c = 0; c < numClasses; c++) {
|
|
456
|
+
if (targets[b * numClasses + c] === 1) {
|
|
457
|
+
trueClass = c;
|
|
458
|
+
break;
|
|
459
|
+
}
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
if (predClass === trueClass) {
|
|
463
|
+
correct++;
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
return correct / batchSize;
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
getConfig() {
|
|
471
|
+
return {
|
|
472
|
+
type: 'cnn',
|
|
473
|
+
...this.config,
|
|
474
|
+
parameters: this.countParameters(),
|
|
475
|
+
};
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
countParameters() {
|
|
479
|
+
let count = 0;
|
|
480
|
+
|
|
481
|
+
// Convolutional layers
|
|
482
|
+
for (let i = 0; i < this.convWeights.length; i++) {
|
|
483
|
+
count += this.convWeights[i].kernel.length;
|
|
484
|
+
count += this.convBiases[i].length;
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
// Dense layers
|
|
488
|
+
for (let i = 0; i < this.denseWeights.length; i++) {
|
|
489
|
+
count += this.denseWeights[i].length;
|
|
490
|
+
count += this.denseBiases[i].length;
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
return count;
|
|
494
|
+
}
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
export { CNNModel };
|