@sparkleideas/ruv-swarm 1.0.18-patch.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/README.md +1565 -0
  2. package/bin/ruv-swarm-clean.js +1872 -0
  3. package/bin/ruv-swarm-memory.js +119 -0
  4. package/bin/ruv-swarm-secure-heartbeat.js +1549 -0
  5. package/bin/ruv-swarm-secure.js +1689 -0
  6. package/package.json +221 -0
  7. package/src/agent.ts +342 -0
  8. package/src/benchmark.js +267 -0
  9. package/src/claude-flow-enhanced.js +839 -0
  10. package/src/claude-integration/advanced-commands.js +561 -0
  11. package/src/claude-integration/core.js +112 -0
  12. package/src/claude-integration/docs.js +1548 -0
  13. package/src/claude-integration/env-template.js +39 -0
  14. package/src/claude-integration/index.js +209 -0
  15. package/src/claude-integration/remote.js +408 -0
  16. package/src/cli-diagnostics.js +364 -0
  17. package/src/cognitive-pattern-evolution.js +1317 -0
  18. package/src/daa-cognition.js +977 -0
  19. package/src/daa-service.d.ts +298 -0
  20. package/src/daa-service.js +1116 -0
  21. package/src/diagnostics.js +533 -0
  22. package/src/errors.js +528 -0
  23. package/src/github-coordinator/README.md +193 -0
  24. package/src/github-coordinator/claude-hooks.js +162 -0
  25. package/src/github-coordinator/gh-cli-coordinator.js +260 -0
  26. package/src/hooks/cli.js +82 -0
  27. package/src/hooks/index.js +1900 -0
  28. package/src/index-enhanced.d.ts +371 -0
  29. package/src/index-enhanced.js +734 -0
  30. package/src/index.d.ts +287 -0
  31. package/src/index.js +405 -0
  32. package/src/index.ts +457 -0
  33. package/src/logger.js +182 -0
  34. package/src/logging-config.js +179 -0
  35. package/src/mcp-daa-tools.js +735 -0
  36. package/src/mcp-tools-benchmarks.js +328 -0
  37. package/src/mcp-tools-enhanced.js +2863 -0
  38. package/src/memory-config.js +42 -0
  39. package/src/meta-learning-framework.js +1359 -0
  40. package/src/neural-agent.js +830 -0
  41. package/src/neural-coordination-protocol.js +1363 -0
  42. package/src/neural-models/README.md +118 -0
  43. package/src/neural-models/autoencoder.js +543 -0
  44. package/src/neural-models/base.js +269 -0
  45. package/src/neural-models/cnn.js +497 -0
  46. package/src/neural-models/gnn.js +447 -0
  47. package/src/neural-models/gru.js +536 -0
  48. package/src/neural-models/index.js +273 -0
  49. package/src/neural-models/lstm.js +551 -0
  50. package/src/neural-models/neural-presets-complete.js +1306 -0
  51. package/src/neural-models/presets/graph.js +392 -0
  52. package/src/neural-models/presets/index.js +279 -0
  53. package/src/neural-models/presets/nlp.js +328 -0
  54. package/src/neural-models/presets/timeseries.js +368 -0
  55. package/src/neural-models/presets/vision.js +387 -0
  56. package/src/neural-models/resnet.js +534 -0
  57. package/src/neural-models/transformer.js +515 -0
  58. package/src/neural-models/vae.js +489 -0
  59. package/src/neural-network-manager.js +1938 -0
  60. package/src/neural-network.ts +296 -0
  61. package/src/neural.js +574 -0
  62. package/src/performance-benchmarks.js +898 -0
  63. package/src/performance.js +458 -0
  64. package/src/persistence-pooled.js +695 -0
  65. package/src/persistence.js +480 -0
  66. package/src/schemas.js +864 -0
  67. package/src/security.js +218 -0
  68. package/src/singleton-container.js +183 -0
  69. package/src/sqlite-pool.js +587 -0
  70. package/src/sqlite-worker.js +141 -0
  71. package/src/types.ts +164 -0
  72. package/src/utils.ts +286 -0
  73. package/src/wasm-loader.js +601 -0
  74. package/src/wasm-loader2.js +404 -0
  75. package/src/wasm-memory-optimizer.js +783 -0
  76. package/src/wasm-types.d.ts +63 -0
  77. package/wasm/README.md +347 -0
  78. package/wasm/neuro-divergent.wasm +0 -0
  79. package/wasm/package.json +18 -0
  80. package/wasm/ruv-fann.wasm +0 -0
  81. package/wasm/ruv_swarm_simd.wasm +0 -0
  82. package/wasm/ruv_swarm_wasm.d.ts +391 -0
  83. package/wasm/ruv_swarm_wasm.js +2164 -0
  84. package/wasm/ruv_swarm_wasm_bg.wasm +0 -0
  85. package/wasm/ruv_swarm_wasm_bg.wasm.d.ts +123 -0
  86. package/wasm/wasm-bindings-loader.mjs +435 -0
  87. package/wasm/wasm-updates.md +684 -0
@@ -0,0 +1,534 @@
1
+ /**
2
+ * Residual Network (ResNet) Model
3
+ * Implements deep neural networks with skip connections
4
+ */
5
+
6
+ import { NeuralModel } from './base.js';
7
+
8
+ class ResNetModel extends NeuralModel {
9
+ constructor(config = {}) {
10
+ super('resnet');
11
+
12
+ // ResNet configuration
13
+ this.config = {
14
+ inputDimensions: config.inputDimensions || 784, // Default for flattened MNIST
15
+ numBlocks: config.numBlocks || 4,
16
+ blockDepth: config.blockDepth || 2,
17
+ hiddenDimensions: config.hiddenDimensions || 256,
18
+ outputDimensions: config.outputDimensions || 10,
19
+ activation: config.activation || 'relu',
20
+ batchNorm: config.batchNorm !== false, // Default true
21
+ dropoutRate: config.dropoutRate || 0.2,
22
+ initialChannels: config.initialChannels || 64,
23
+ ...config,
24
+ };
25
+
26
+ // Initialize layers
27
+ this.blocks = [];
28
+ this.batchNormParams = [];
29
+ this.skipConnections = [];
30
+ this.outputLayer = null;
31
+
32
+ this.initializeWeights();
33
+ }
34
+
35
+ initializeWeights() {
36
+ let currentDimensions = this.config.inputDimensions;
37
+
38
+ // Initial projection layer
39
+ this.inputProjection = {
40
+ weight: this.createWeight([currentDimensions, this.config.initialChannels]),
41
+ bias: new Float32Array(this.config.initialChannels).fill(0.0),
42
+ };
43
+ currentDimensions = this.config.initialChannels;
44
+
45
+ // Create residual blocks
46
+ for (let blockIdx = 0; blockIdx < this.config.numBlocks; blockIdx++) {
47
+ const block = [];
48
+ const blockBatchNorm = [];
49
+
50
+ // Determine block dimensions
51
+ const outputDim = Math.min(
52
+ currentDimensions * 2,
53
+ this.config.hiddenDimensions,
54
+ );
55
+
56
+ // Create layers within block
57
+ for (let layerIdx = 0; layerIdx < this.config.blockDepth; layerIdx++) {
58
+ const inputDim = layerIdx === 0 ? currentDimensions : outputDim;
59
+
60
+ block.push({
61
+ weight: this.createWeight([inputDim, outputDim]),
62
+ bias: new Float32Array(outputDim).fill(0.0),
63
+ });
64
+
65
+ if (this.config.batchNorm) {
66
+ blockBatchNorm.push({
67
+ gamma: new Float32Array(outputDim).fill(1.0),
68
+ beta: new Float32Array(outputDim).fill(0.0),
69
+ runningMean: new Float32Array(outputDim).fill(0.0),
70
+ runningVar: new Float32Array(outputDim).fill(1.0),
71
+ momentum: 0.9,
72
+ });
73
+ }
74
+ }
75
+
76
+ // Skip connection projection if dimensions change
77
+ if (currentDimensions !== outputDim) {
78
+ this.skipConnections.push({
79
+ weight: this.createWeight([currentDimensions, outputDim]),
80
+ bias: new Float32Array(outputDim).fill(0.0),
81
+ });
82
+ } else {
83
+ this.skipConnections.push(null); // Identity skip connection
84
+ }
85
+
86
+ this.blocks.push(block);
87
+ this.batchNormParams.push(blockBatchNorm);
88
+ currentDimensions = outputDim;
89
+ }
90
+
91
+ // Output layer
92
+ this.outputLayer = {
93
+ weight: this.createWeight([currentDimensions, this.config.outputDimensions]),
94
+ bias: new Float32Array(this.config.outputDimensions).fill(0.0),
95
+ };
96
+ }
97
+
98
+ createWeight(shape) {
99
+ const size = shape.reduce((a, b) => a * b, 1);
100
+ const weight = new Float32Array(size);
101
+
102
+ // He initialization for ReLU
103
+ const scale = Math.sqrt(2.0 / shape[0]);
104
+ for (let i = 0; i < size; i++) {
105
+ weight[i] = (Math.random() * 2 - 1) * scale;
106
+ }
107
+
108
+ weight.shape = shape;
109
+ return weight;
110
+ }
111
+
112
+ async forward(input, training = false) {
113
+ // Initial projection
114
+ let x = this.linearTransform(input, this.inputProjection.weight, this.inputProjection.bias);
115
+ x = this.applyActivation(x);
116
+
117
+ // Process through residual blocks
118
+ for (let blockIdx = 0; blockIdx < this.config.numBlocks; blockIdx++) {
119
+ x = await this.forwardBlock(x, blockIdx, training);
120
+ }
121
+
122
+ // Global average pooling (if input has spatial dimensions)
123
+ if (x.shape && x.shape.length > 2) {
124
+ x = this.globalAveragePooling(x);
125
+ }
126
+
127
+ // Final classification layer
128
+ const output = this.linearTransform(x, this.outputLayer.weight, this.outputLayer.bias);
129
+
130
+ return output;
131
+ }
132
+
133
+ async forwardBlock(input, blockIdx, training = false) {
134
+ const block = this.blocks[blockIdx];
135
+ const batchNorm = this.batchNormParams[blockIdx];
136
+ const skipConnection = this.skipConnections[blockIdx];
137
+
138
+ // Save input for skip connection
139
+ let identity = input;
140
+
141
+ // Apply skip connection projection if needed
142
+ if (skipConnection) {
143
+ identity = this.linearTransform(input, skipConnection.weight, skipConnection.bias);
144
+ }
145
+
146
+ // Forward through block layers
147
+ let x = input;
148
+ for (let layerIdx = 0; layerIdx < block.length; layerIdx++) {
149
+ const layer = block[layerIdx];
150
+
151
+ // Linear transformation
152
+ x = this.linearTransform(x, layer.weight, layer.bias);
153
+
154
+ // Batch normalization
155
+ if (this.config.batchNorm && batchNorm[layerIdx]) {
156
+ x = this.batchNormalize(x, batchNorm[layerIdx], training);
157
+ }
158
+
159
+ // Activation (except for last layer in block)
160
+ if (layerIdx < block.length - 1) {
161
+ x = this.applyActivation(x);
162
+ }
163
+
164
+ // Dropout if training
165
+ if (training && this.config.dropoutRate > 0 && layerIdx < block.length - 1) {
166
+ x = this.dropout(x, this.config.dropoutRate);
167
+ }
168
+ }
169
+
170
+ // Add skip connection
171
+ x = this.add(x, identity);
172
+
173
+ // Final activation
174
+ x = this.applyActivation(x);
175
+
176
+ return x;
177
+ }
178
+
179
+ linearTransform(input, weight, bias) {
180
+ const batchSize = input.shape ? input.shape[0] : 1;
181
+ const inputDim = weight.shape[0];
182
+ const outputDim = weight.shape[1];
183
+
184
+ const output = new Float32Array(batchSize * outputDim);
185
+
186
+ for (let b = 0; b < batchSize; b++) {
187
+ for (let out = 0; out < outputDim; out++) {
188
+ let sum = bias[out];
189
+ for (let inp = 0; inp < inputDim; inp++) {
190
+ sum += input[b * inputDim + inp] * weight[inp * outputDim + out];
191
+ }
192
+ output[b * outputDim + out] = sum;
193
+ }
194
+ }
195
+
196
+ output.shape = [batchSize, outputDim];
197
+ return output;
198
+ }
199
+
200
+ batchNormalize(input, params, training = false) {
201
+ const shape = input.shape || [input.length];
202
+ const features = shape[shape.length - 1];
203
+ const batchSize = input.length / features;
204
+
205
+ const normalized = new Float32Array(input.length);
206
+
207
+ if (training) {
208
+ // Calculate batch statistics
209
+ const mean = new Float32Array(features);
210
+ const variance = new Float32Array(features);
211
+
212
+ // Calculate mean
213
+ for (let f = 0; f < features; f++) {
214
+ let sum = 0;
215
+ for (let b = 0; b < batchSize; b++) {
216
+ sum += input[b * features + f];
217
+ }
218
+ mean[f] = sum / batchSize;
219
+ }
220
+
221
+ // Calculate variance
222
+ for (let f = 0; f < features; f++) {
223
+ let sum = 0;
224
+ for (let b = 0; b < batchSize; b++) {
225
+ const diff = input[b * features + f] - mean[f];
226
+ sum += diff * diff;
227
+ }
228
+ variance[f] = sum / batchSize;
229
+ }
230
+
231
+ // Update running statistics
232
+ for (let f = 0; f < features; f++) {
233
+ params.runningMean[f] = params.momentum * params.runningMean[f] +
234
+ (1 - params.momentum) * mean[f];
235
+ params.runningVar[f] = params.momentum * params.runningVar[f] +
236
+ (1 - params.momentum) * variance[f];
237
+ }
238
+
239
+ // Normalize using batch statistics
240
+ for (let b = 0; b < batchSize; b++) {
241
+ for (let f = 0; f < features; f++) {
242
+ const idx = b * features + f;
243
+ const norm = (input[idx] - mean[f]) / Math.sqrt(variance[f] + 1e-5);
244
+ normalized[idx] = params.gamma[f] * norm + params.beta[f];
245
+ }
246
+ }
247
+ } else {
248
+ // Use running statistics for inference
249
+ for (let b = 0; b < batchSize; b++) {
250
+ for (let f = 0; f < features; f++) {
251
+ const idx = b * features + f;
252
+ const norm = (input[idx] - params.runningMean[f]) /
253
+ Math.sqrt(params.runningVar[f] + 1e-5);
254
+ normalized[idx] = params.gamma[f] * norm + params.beta[f];
255
+ }
256
+ }
257
+ }
258
+
259
+ normalized.shape = input.shape;
260
+ return normalized;
261
+ }
262
+
263
+ applyActivation(input) {
264
+ switch (this.config.activation) {
265
+ case 'relu':
266
+ return this.relu(input);
267
+ case 'leaky_relu':
268
+ return this.leakyRelu(input);
269
+ case 'elu':
270
+ return this.elu(input);
271
+ case 'swish':
272
+ return this.swish(input);
273
+ default:
274
+ return this.relu(input);
275
+ }
276
+ }
277
+
278
+ leakyRelu(input, alpha = 0.01) {
279
+ const result = new Float32Array(input.length);
280
+ for (let i = 0; i < input.length; i++) {
281
+ result[i] = input[i] > 0 ? input[i] : alpha * input[i];
282
+ }
283
+ result.shape = input.shape;
284
+ return result;
285
+ }
286
+
287
+ elu(input, alpha = 1.0) {
288
+ const result = new Float32Array(input.length);
289
+ for (let i = 0; i < input.length; i++) {
290
+ result[i] = input[i] > 0 ? input[i] : alpha * (Math.exp(input[i]) - 1);
291
+ }
292
+ result.shape = input.shape;
293
+ return result;
294
+ }
295
+
296
+ swish(input) {
297
+ const result = new Float32Array(input.length);
298
+ for (let i = 0; i < input.length; i++) {
299
+ result[i] = input[i] * this.sigmoid([input[i]])[0];
300
+ }
301
+ result.shape = input.shape;
302
+ return result;
303
+ }
304
+
305
+ globalAveragePooling(input) {
306
+ // Assumes input shape is [batch, height, width, channels]
307
+ const { shape } = input;
308
+ const batchSize = shape[0];
309
+ const spatialSize = shape[1] * shape[2];
310
+ const channels = shape[3];
311
+
312
+ const pooled = new Float32Array(batchSize * channels);
313
+
314
+ for (let b = 0; b < batchSize; b++) {
315
+ for (let c = 0; c < channels; c++) {
316
+ let sum = 0;
317
+ for (let s = 0; s < spatialSize; s++) {
318
+ sum += input[b * spatialSize * channels + s * channels + c];
319
+ }
320
+ pooled[b * channels + c] = sum / spatialSize;
321
+ }
322
+ }
323
+
324
+ pooled.shape = [batchSize, channels];
325
+ return pooled;
326
+ }
327
+
328
+ async train(trainingData, options = {}) {
329
+ const {
330
+ epochs = 20,
331
+ batchSize = 32,
332
+ learningRate = 0.001,
333
+ weightDecay = 0.0001,
334
+ validationSplit = 0.1,
335
+ } = options;
336
+
337
+ const trainingHistory = [];
338
+
339
+ // Split data
340
+ const splitIndex = Math.floor(trainingData.length * (1 - validationSplit));
341
+ const trainData = trainingData.slice(0, splitIndex);
342
+ const valData = trainingData.slice(splitIndex);
343
+
344
+ // Learning rate schedule
345
+ const lrSchedule = (epoch) => {
346
+ if (epoch < 10) {
347
+ return learningRate;
348
+ }
349
+ if (epoch < 15) {
350
+ return learningRate * 0.1;
351
+ }
352
+ return learningRate * 0.01;
353
+ };
354
+
355
+ for (let epoch = 0; epoch < epochs; epoch++) {
356
+ let epochLoss = 0;
357
+ let correctPredictions = 0;
358
+ let totalSamples = 0;
359
+
360
+ const currentLR = lrSchedule(epoch);
361
+
362
+ // Shuffle training data
363
+ const shuffled = this.shuffle(trainData);
364
+
365
+ // Process batches
366
+ for (let i = 0; i < shuffled.length; i += batchSize) {
367
+ const batch = shuffled.slice(i, Math.min(i + batchSize, shuffled.length));
368
+
369
+ // Forward pass
370
+ const predictions = await this.forward(batch.inputs, true);
371
+
372
+ // Calculate loss with L2 regularization
373
+ const loss = this.crossEntropyLoss(predictions, batch.targets);
374
+ const l2Loss = this.calculateL2Loss() * weightDecay;
375
+ const totalLoss = loss + l2Loss;
376
+
377
+ epochLoss += totalLoss;
378
+
379
+ // Calculate accuracy
380
+ const predicted = this.argmax(predictions);
381
+ const actual = this.argmax(batch.targets);
382
+ for (let j = 0; j < predicted.length; j++) {
383
+ if (predicted[j] === actual[j]) {
384
+ correctPredictions++;
385
+ }
386
+ }
387
+ totalSamples += batch.length;
388
+
389
+ // Backward pass
390
+ await this.backward(totalLoss, currentLR);
391
+ }
392
+
393
+ // Validation
394
+ const valMetrics = await this.validateWithAccuracy(valData);
395
+
396
+ const trainAccuracy = correctPredictions / totalSamples;
397
+ const avgTrainLoss = epochLoss / Math.ceil(trainData.length / batchSize);
398
+
399
+ trainingHistory.push({
400
+ epoch: epoch + 1,
401
+ trainLoss: avgTrainLoss,
402
+ trainAccuracy,
403
+ valLoss: valMetrics.loss,
404
+ valAccuracy: valMetrics.accuracy,
405
+ learningRate: currentLR,
406
+ });
407
+
408
+ console.log(
409
+ `Epoch ${epoch + 1}/${epochs} - ` +
410
+ `Train Loss: ${avgTrainLoss.toFixed(4)}, Train Acc: ${(trainAccuracy * 100).toFixed(2)}% - ` +
411
+ `Val Loss: ${valMetrics.loss.toFixed(4)}, Val Acc: ${(valMetrics.accuracy * 100).toFixed(2)}%`,
412
+ );
413
+ }
414
+
415
+ return {
416
+ history: trainingHistory,
417
+ finalLoss: trainingHistory[trainingHistory.length - 1].trainLoss,
418
+ modelType: 'resnet',
419
+ accuracy: trainingHistory[trainingHistory.length - 1].valAccuracy,
420
+ };
421
+ }
422
+
423
+ calculateL2Loss() {
424
+ let l2Sum = 0;
425
+ let count = 0;
426
+
427
+ // Add L2 norm of all weights
428
+ for (const block of this.blocks) {
429
+ for (const layer of block) {
430
+ for (let i = 0; i < layer.weight.length; i++) {
431
+ l2Sum += layer.weight[i] * layer.weight[i];
432
+ count++;
433
+ }
434
+ }
435
+ }
436
+
437
+ return l2Sum / count;
438
+ }
439
+
440
+ argmax(tensor) {
441
+ // Assumes tensor shape is [batch, classes]
442
+ const batchSize = tensor.shape[0];
443
+ const numClasses = tensor.shape[1];
444
+ const result = new Int32Array(batchSize);
445
+
446
+ for (let b = 0; b < batchSize; b++) {
447
+ let maxIdx = 0;
448
+ let maxVal = tensor[b * numClasses];
449
+
450
+ for (let c = 1; c < numClasses; c++) {
451
+ if (tensor[b * numClasses + c] > maxVal) {
452
+ maxVal = tensor[b * numClasses + c];
453
+ maxIdx = c;
454
+ }
455
+ }
456
+
457
+ result[b] = maxIdx;
458
+ }
459
+
460
+ return result;
461
+ }
462
+
463
+ async validateWithAccuracy(validationData) {
464
+ let totalLoss = 0;
465
+ let correctPredictions = 0;
466
+ let totalSamples = 0;
467
+
468
+ for (const batch of validationData) {
469
+ const predictions = await this.forward(batch.inputs, false);
470
+ const loss = this.crossEntropyLoss(predictions, batch.targets);
471
+ totalLoss += loss;
472
+
473
+ const predicted = this.argmax(predictions);
474
+ const actual = this.argmax(batch.targets);
475
+ for (let i = 0; i < predicted.length; i++) {
476
+ if (predicted[i] === actual[i]) {
477
+ correctPredictions++;
478
+ }
479
+ }
480
+ totalSamples += batch.inputs.shape[0];
481
+ }
482
+
483
+ return {
484
+ loss: totalLoss / validationData.length,
485
+ accuracy: correctPredictions / totalSamples,
486
+ };
487
+ }
488
+
489
+ getConfig() {
490
+ return {
491
+ type: 'resnet',
492
+ ...this.config,
493
+ parameters: this.countParameters(),
494
+ depth: this.config.numBlocks * this.config.blockDepth + 2, // +2 for input and output layers
495
+ };
496
+ }
497
+
498
+ countParameters() {
499
+ let count = 0;
500
+
501
+ // Input projection
502
+ count += this.inputProjection.weight.length + this.inputProjection.bias.length;
503
+
504
+ // Residual blocks
505
+ for (let blockIdx = 0; blockIdx < this.blocks.length; blockIdx++) {
506
+ const block = this.blocks[blockIdx];
507
+
508
+ // Block layers
509
+ for (const layer of block) {
510
+ count += layer.weight.length + layer.bias.length;
511
+ }
512
+
513
+ // Skip connection
514
+ if (this.skipConnections[blockIdx]) {
515
+ count += this.skipConnections[blockIdx].weight.length;
516
+ count += this.skipConnections[blockIdx].bias.length;
517
+ }
518
+
519
+ // Batch norm parameters
520
+ if (this.config.batchNorm) {
521
+ for (const bn of this.batchNormParams[blockIdx]) {
522
+ count += bn.gamma.length + bn.beta.length;
523
+ }
524
+ }
525
+ }
526
+
527
+ // Output layer
528
+ count += this.outputLayer.weight.length + this.outputLayer.bias.length;
529
+
530
+ return count;
531
+ }
532
+ }
533
+
534
+ export { ResNetModel };