@sparkleideas/ruv-swarm 1.0.18-patch.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/README.md +1565 -0
  2. package/bin/ruv-swarm-clean.js +1872 -0
  3. package/bin/ruv-swarm-memory.js +119 -0
  4. package/bin/ruv-swarm-secure-heartbeat.js +1549 -0
  5. package/bin/ruv-swarm-secure.js +1689 -0
  6. package/package.json +221 -0
  7. package/src/agent.ts +342 -0
  8. package/src/benchmark.js +267 -0
  9. package/src/claude-flow-enhanced.js +839 -0
  10. package/src/claude-integration/advanced-commands.js +561 -0
  11. package/src/claude-integration/core.js +112 -0
  12. package/src/claude-integration/docs.js +1548 -0
  13. package/src/claude-integration/env-template.js +39 -0
  14. package/src/claude-integration/index.js +209 -0
  15. package/src/claude-integration/remote.js +408 -0
  16. package/src/cli-diagnostics.js +364 -0
  17. package/src/cognitive-pattern-evolution.js +1317 -0
  18. package/src/daa-cognition.js +977 -0
  19. package/src/daa-service.d.ts +298 -0
  20. package/src/daa-service.js +1116 -0
  21. package/src/diagnostics.js +533 -0
  22. package/src/errors.js +528 -0
  23. package/src/github-coordinator/README.md +193 -0
  24. package/src/github-coordinator/claude-hooks.js +162 -0
  25. package/src/github-coordinator/gh-cli-coordinator.js +260 -0
  26. package/src/hooks/cli.js +82 -0
  27. package/src/hooks/index.js +1900 -0
  28. package/src/index-enhanced.d.ts +371 -0
  29. package/src/index-enhanced.js +734 -0
  30. package/src/index.d.ts +287 -0
  31. package/src/index.js +405 -0
  32. package/src/index.ts +457 -0
  33. package/src/logger.js +182 -0
  34. package/src/logging-config.js +179 -0
  35. package/src/mcp-daa-tools.js +735 -0
  36. package/src/mcp-tools-benchmarks.js +328 -0
  37. package/src/mcp-tools-enhanced.js +2863 -0
  38. package/src/memory-config.js +42 -0
  39. package/src/meta-learning-framework.js +1359 -0
  40. package/src/neural-agent.js +830 -0
  41. package/src/neural-coordination-protocol.js +1363 -0
  42. package/src/neural-models/README.md +118 -0
  43. package/src/neural-models/autoencoder.js +543 -0
  44. package/src/neural-models/base.js +269 -0
  45. package/src/neural-models/cnn.js +497 -0
  46. package/src/neural-models/gnn.js +447 -0
  47. package/src/neural-models/gru.js +536 -0
  48. package/src/neural-models/index.js +273 -0
  49. package/src/neural-models/lstm.js +551 -0
  50. package/src/neural-models/neural-presets-complete.js +1306 -0
  51. package/src/neural-models/presets/graph.js +392 -0
  52. package/src/neural-models/presets/index.js +279 -0
  53. package/src/neural-models/presets/nlp.js +328 -0
  54. package/src/neural-models/presets/timeseries.js +368 -0
  55. package/src/neural-models/presets/vision.js +387 -0
  56. package/src/neural-models/resnet.js +534 -0
  57. package/src/neural-models/transformer.js +515 -0
  58. package/src/neural-models/vae.js +489 -0
  59. package/src/neural-network-manager.js +1938 -0
  60. package/src/neural-network.ts +296 -0
  61. package/src/neural.js +574 -0
  62. package/src/performance-benchmarks.js +898 -0
  63. package/src/performance.js +458 -0
  64. package/src/persistence-pooled.js +695 -0
  65. package/src/persistence.js +480 -0
  66. package/src/schemas.js +864 -0
  67. package/src/security.js +218 -0
  68. package/src/singleton-container.js +183 -0
  69. package/src/sqlite-pool.js +587 -0
  70. package/src/sqlite-worker.js +141 -0
  71. package/src/types.ts +164 -0
  72. package/src/utils.ts +286 -0
  73. package/src/wasm-loader.js +601 -0
  74. package/src/wasm-loader2.js +404 -0
  75. package/src/wasm-memory-optimizer.js +783 -0
  76. package/src/wasm-types.d.ts +63 -0
  77. package/wasm/README.md +347 -0
  78. package/wasm/neuro-divergent.wasm +0 -0
  79. package/wasm/package.json +18 -0
  80. package/wasm/ruv-fann.wasm +0 -0
  81. package/wasm/ruv_swarm_simd.wasm +0 -0
  82. package/wasm/ruv_swarm_wasm.d.ts +391 -0
  83. package/wasm/ruv_swarm_wasm.js +2164 -0
  84. package/wasm/ruv_swarm_wasm_bg.wasm +0 -0
  85. package/wasm/ruv_swarm_wasm_bg.wasm.d.ts +123 -0
  86. package/wasm/wasm-bindings-loader.mjs +435 -0
  87. package/wasm/wasm-updates.md +684 -0
@@ -0,0 +1,387 @@
1
+ /**
2
+ * Computer Vision Neural Network Presets
3
+ * Production-ready configurations for image and video processing tasks
4
+ */
5
+
6
+ export const visionPresets = {
7
+ // Real-time Object Detection
8
+ object_detection_realtime: {
9
+ name: 'Real-time Object Detector',
10
+ description: 'Optimized for real-time object detection in video streams',
11
+ model: 'cnn',
12
+ config: {
13
+ inputShape: [416, 416, 3],
14
+ architecture: 'yolo_v5',
15
+ convLayers: [
16
+ { filters: 32, kernelSize: 3, stride: 1, activation: 'mish' },
17
+ { filters: 64, kernelSize: 3, stride: 2, activation: 'mish' },
18
+ { filters: 128, kernelSize: 3, stride: 1, activation: 'mish' },
19
+ { filters: 256, kernelSize: 3, stride: 2, activation: 'mish' },
20
+ ],
21
+ anchors: [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]],
22
+ numClasses: 80,
23
+ dropoutRate: 0.2,
24
+ },
25
+ training: {
26
+ batchSize: 16,
27
+ learningRate: 1e-3,
28
+ epochs: 100,
29
+ optimizer: 'sgd',
30
+ momentum: 0.9,
31
+ augmentation: {
32
+ rotation: 15,
33
+ zoom: 0.2,
34
+ flip: true,
35
+ colorJitter: 0.2,
36
+ },
37
+ },
38
+ performance: {
39
+ expectedAccuracy: '85-88% mAP',
40
+ inferenceTime: '8ms (30+ FPS)',
41
+ memoryUsage: '150MB',
42
+ trainingTime: '24-48 hours on GPU',
43
+ },
44
+ useCase: 'Security cameras, autonomous vehicles, robotics',
45
+ },
46
+
47
+ // Facial Recognition
48
+ facial_recognition_secure: {
49
+ name: 'Secure Facial Recognition',
50
+ description: 'High-accuracy facial recognition with privacy features',
51
+ model: 'resnet',
52
+ config: {
53
+ inputShape: [160, 160, 3],
54
+ architecture: 'facenet',
55
+ numBlocks: 8,
56
+ blockDepth: 3,
57
+ hiddenDimensions: 512,
58
+ initialChannels: 64,
59
+ embeddingSize: 128,
60
+ useArcFaceLoss: true,
61
+ },
62
+ training: {
63
+ batchSize: 128,
64
+ learningRate: 5e-4,
65
+ epochs: 200,
66
+ optimizer: 'adam',
67
+ scheduler: 'cosine',
68
+ margin: 0.5,
69
+ scale: 30,
70
+ },
71
+ performance: {
72
+ expectedAccuracy: '99.2% on LFW',
73
+ inferenceTime: '5ms',
74
+ memoryUsage: '200MB',
75
+ trainingTime: '3-5 days on GPU',
76
+ },
77
+ useCase: 'Access control, identity verification, secure authentication',
78
+ },
79
+
80
+ // Medical Image Analysis
81
+ medical_imaging_analysis: {
82
+ name: 'Medical Image Analyzer',
83
+ description: 'Analyze medical images for diagnosis support',
84
+ model: 'cnn',
85
+ config: {
86
+ inputShape: [512, 512, 1], // Grayscale medical images
87
+ architecture: 'unet_3d',
88
+ convLayers: [
89
+ { filters: 64, kernelSize: 3, stride: 1, activation: 'relu', batchNorm: true },
90
+ { filters: 128, kernelSize: 3, stride: 1, activation: 'relu', batchNorm: true },
91
+ { filters: 256, kernelSize: 3, stride: 1, activation: 'relu', batchNorm: true },
92
+ { filters: 512, kernelSize: 3, stride: 1, activation: 'relu', batchNorm: true },
93
+ ],
94
+ skipConnections: true,
95
+ attentionGates: true,
96
+ dropoutRate: 0.3,
97
+ },
98
+ training: {
99
+ batchSize: 8,
100
+ learningRate: 1e-4,
101
+ epochs: 150,
102
+ optimizer: 'adamw',
103
+ lossFunction: 'dice_bce',
104
+ classWeights: 'auto',
105
+ augmentation: {
106
+ rotation: 20,
107
+ elasticDeformation: true,
108
+ intensityShift: 0.1,
109
+ },
110
+ },
111
+ performance: {
112
+ expectedAccuracy: '93-95% Dice Score',
113
+ inferenceTime: '200ms',
114
+ memoryUsage: '2GB',
115
+ trainingTime: '48-72 hours on GPU',
116
+ },
117
+ useCase: 'Tumor detection, organ segmentation, disease classification',
118
+ },
119
+
120
+ // Autonomous Driving
121
+ autonomous_driving: {
122
+ name: 'Autonomous Driving Vision',
123
+ description: 'Multi-task vision for autonomous vehicles',
124
+ model: 'cnn',
125
+ config: {
126
+ inputShape: [640, 480, 3],
127
+ architecture: 'multitask_network',
128
+ backboneNetwork: 'efficientnet_b4',
129
+ tasks: {
130
+ segmentation: { numClasses: 19 },
131
+ detection: { numClasses: 10 },
132
+ depthEstimation: { outputChannels: 1 },
133
+ laneDetection: { numLanes: 4 },
134
+ },
135
+ featurePyramid: true,
136
+ dropoutRate: 0.2,
137
+ },
138
+ training: {
139
+ batchSize: 4,
140
+ learningRate: 2e-4,
141
+ epochs: 80,
142
+ optimizer: 'adam',
143
+ multiTaskWeights: {
144
+ segmentation: 1.0,
145
+ detection: 1.0,
146
+ depth: 0.5,
147
+ lanes: 0.8,
148
+ },
149
+ mixedPrecision: true,
150
+ },
151
+ performance: {
152
+ expectedAccuracy: '88-91% mIoU',
153
+ inferenceTime: '25ms',
154
+ memoryUsage: '500MB',
155
+ trainingTime: '5-7 days on multi-GPU',
156
+ },
157
+ useCase: 'Self-driving cars, ADAS systems, robotics navigation',
158
+ },
159
+
160
+ // Quality Inspection
161
+ quality_inspection: {
162
+ name: 'Industrial Quality Inspector',
163
+ description: 'Detect defects in manufacturing',
164
+ model: 'cnn',
165
+ config: {
166
+ inputShape: [224, 224, 3],
167
+ architecture: 'siamese_network',
168
+ backbone: 'resnet50',
169
+ metricLearning: true,
170
+ embeddingDimension: 256,
171
+ anomalyThreshold: 0.85,
172
+ dropoutRate: 0.3,
173
+ },
174
+ training: {
175
+ batchSize: 32,
176
+ learningRate: 1e-3,
177
+ epochs: 100,
178
+ optimizer: 'adam',
179
+ contrastiveLoss: true,
180
+ hardNegativeMining: true,
181
+ augmentation: {
182
+ rotation: 360,
183
+ brightness: 0.3,
184
+ contrast: 0.3,
185
+ noise: 0.05,
186
+ },
187
+ },
188
+ performance: {
189
+ expectedAccuracy: '96-98% defect detection',
190
+ inferenceTime: '10ms',
191
+ memoryUsage: '300MB',
192
+ trainingTime: '12-24 hours on GPU',
193
+ },
194
+ useCase: 'Manufacturing QC, PCB inspection, surface defect detection',
195
+ },
196
+
197
+ // Satellite Image Analysis
198
+ satellite_image_analysis: {
199
+ name: 'Satellite Image Analyzer',
200
+ description: 'Analyze satellite imagery for various applications',
201
+ model: 'cnn',
202
+ config: {
203
+ inputShape: [512, 512, 8], // Multispectral channels
204
+ architecture: 'deeplab_v3_plus',
205
+ backbone: 'xception',
206
+ outputStride: 16,
207
+ numClasses: 15,
208
+ asppDilationRates: [6, 12, 18],
209
+ dropoutRate: 0.3,
210
+ },
211
+ training: {
212
+ batchSize: 8,
213
+ learningRate: 5e-4,
214
+ epochs: 120,
215
+ optimizer: 'sgd',
216
+ momentum: 0.9,
217
+ polynomialDecay: true,
218
+ augmentation: {
219
+ randomCrop: 448,
220
+ horizontalFlip: true,
221
+ verticalFlip: true,
222
+ gaussianNoise: 0.01,
223
+ },
224
+ },
225
+ performance: {
226
+ expectedAccuracy: '89-92% pixel accuracy',
227
+ inferenceTime: '150ms',
228
+ memoryUsage: '1.5GB',
229
+ trainingTime: '36-48 hours on GPU',
230
+ },
231
+ useCase: 'Land use classification, change detection, disaster response',
232
+ },
233
+
234
+ // Document Scanner
235
+ document_scanner: {
236
+ name: 'Document Scanner and OCR',
237
+ description: 'Scan and extract text from documents',
238
+ model: 'cnn',
239
+ config: {
240
+ inputShape: [768, 1024, 3],
241
+ architecture: 'crnn',
242
+ cnnBackbone: 'mobilenet_v3',
243
+ rnnHiddenSize: 256,
244
+ rnnLayers: 2,
245
+ vocabSize: 95, // Printable ASCII
246
+ ctcBeamWidth: 100,
247
+ dropoutRate: 0.3,
248
+ },
249
+ training: {
250
+ batchSize: 16,
251
+ learningRate: 1e-3,
252
+ epochs: 50,
253
+ optimizer: 'adam',
254
+ ctcLoss: true,
255
+ augmentation: {
256
+ perspective: true,
257
+ rotation: 5,
258
+ shear: 0.2,
259
+ blur: 0.5,
260
+ },
261
+ },
262
+ performance: {
263
+ expectedAccuracy: '98-99% character accuracy',
264
+ inferenceTime: '50ms',
265
+ memoryUsage: '400MB',
266
+ trainingTime: '24-36 hours on GPU',
267
+ },
268
+ useCase: 'Document digitization, receipt scanning, form processing',
269
+ },
270
+
271
+ // Video Action Recognition
272
+ video_action_recognition: {
273
+ name: 'Video Action Recognizer',
274
+ description: 'Recognize human actions in video sequences',
275
+ model: 'cnn',
276
+ config: {
277
+ inputShape: [16, 224, 224, 3], // 16 frames
278
+ architecture: 'i3d',
279
+ inflatedKernels: true,
280
+ temporalKernelSize: 3,
281
+ numClasses: 400,
282
+ includeOpticalFlow: false,
283
+ dropoutRate: 0.5,
284
+ },
285
+ training: {
286
+ batchSize: 8,
287
+ learningRate: 1e-3,
288
+ epochs: 80,
289
+ optimizer: 'sgd',
290
+ momentum: 0.9,
291
+ clipGradientNorm: 40,
292
+ augmentation: {
293
+ temporalJitter: 4,
294
+ spatialCrop: 'random',
295
+ colorJitter: 0.2,
296
+ },
297
+ },
298
+ performance: {
299
+ expectedAccuracy: '82-85% top-1',
300
+ inferenceTime: '100ms per clip',
301
+ memoryUsage: '800MB',
302
+ trainingTime: '3-5 days on GPU',
303
+ },
304
+ useCase: 'Sports analysis, surveillance, human-computer interaction',
305
+ },
306
+
307
+ // Image Enhancement
308
+ image_enhancement: {
309
+ name: 'AI Image Enhancer',
310
+ description: 'Enhance image quality and resolution',
311
+ model: 'autoencoder',
312
+ config: {
313
+ inputSize: 65536, // 256x256
314
+ encoderLayers: [32768, 16384, 8192, 4096],
315
+ bottleneckSize: 2048,
316
+ decoderLayers: [4096, 8192, 16384, 32768],
317
+ skipConnections: true,
318
+ residualLearning: true,
319
+ perceptualLoss: true,
320
+ activation: 'prelu',
321
+ },
322
+ training: {
323
+ batchSize: 16,
324
+ learningRate: 2e-4,
325
+ epochs: 200,
326
+ optimizer: 'adam',
327
+ lossWeights: {
328
+ reconstruction: 1.0,
329
+ perceptual: 0.1,
330
+ adversarial: 0.001,
331
+ },
332
+ scheduler: 'reduceLROnPlateau',
333
+ },
334
+ performance: {
335
+ expectedAccuracy: '32-35 PSNR',
336
+ inferenceTime: '80ms',
337
+ memoryUsage: '600MB',
338
+ trainingTime: '48-72 hours on GPU',
339
+ },
340
+ useCase: 'Photo restoration, super-resolution, denoising',
341
+ },
342
+
343
+ // Style Transfer
344
+ style_transfer: {
345
+ name: 'Neural Style Transfer',
346
+ description: 'Apply artistic styles to images',
347
+ model: 'cnn',
348
+ config: {
349
+ inputShape: [512, 512, 3],
350
+ architecture: 'style_transfer_net',
351
+ encoderBackbone: 'vgg19',
352
+ decoderDepth: 5,
353
+ instanceNormalization: true,
354
+ styleEmbeddingSize: 256,
355
+ numStyles: 10,
356
+ dropoutRate: 0.0,
357
+ },
358
+ training: {
359
+ batchSize: 8,
360
+ learningRate: 1e-3,
361
+ epochs: 40,
362
+ optimizer: 'adam',
363
+ contentWeight: 1.0,
364
+ styleWeight: 100000,
365
+ tvWeight: 1e-6,
366
+ useMultipleStyleLayers: true,
367
+ },
368
+ performance: {
369
+ expectedAccuracy: 'Subjective quality',
370
+ inferenceTime: '100ms',
371
+ memoryUsage: '500MB',
372
+ trainingTime: '12-24 hours on GPU',
373
+ },
374
+ useCase: 'Artistic applications, photo filters, content creation',
375
+ },
376
+ };
377
+
378
+ // Export utility function to get preset by name
379
+ export const getVisionPreset = (presetName) => {
380
+ if (!visionPresets[presetName]) {
381
+ throw new Error(`Vision preset '${presetName}' not found. Available presets: ${Object.keys(visionPresets).join(', ')}`);
382
+ }
383
+ return visionPresets[presetName];
384
+ };
385
+
386
+ // Export list of available presets
387
+ export const availableVisionPresets = Object.keys(visionPresets);