@reicek/neataptic-ts 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (272) hide show
  1. package/.github/ISSUE_TEMPLATE/bug_report.md +33 -0
  2. package/.github/ISSUE_TEMPLATE/feature_request.md +27 -0
  3. package/.github/PULL_REQUEST_TEMPLATE.md +28 -0
  4. package/.github/workflows/ci.yml +41 -0
  5. package/.github/workflows/deploy-pages.yml +29 -0
  6. package/.github/workflows/manual_release_pipeline.yml +62 -0
  7. package/.github/workflows/publish.yml +85 -0
  8. package/.github/workflows/release_dispatch.yml +38 -0
  9. package/.travis.yml +5 -0
  10. package/CONTRIBUTING.md +92 -0
  11. package/LICENSE +24 -0
  12. package/ONNX_EXPORT.md +87 -0
  13. package/README.md +1173 -0
  14. package/RELEASE.md +54 -0
  15. package/dist-docs/package.json +1 -0
  16. package/dist-docs/scripts/generate-docs.d.ts +2 -0
  17. package/dist-docs/scripts/generate-docs.d.ts.map +1 -0
  18. package/dist-docs/scripts/generate-docs.js +536 -0
  19. package/dist-docs/scripts/generate-docs.js.map +1 -0
  20. package/dist-docs/scripts/render-docs-html.d.ts +2 -0
  21. package/dist-docs/scripts/render-docs-html.d.ts.map +1 -0
  22. package/dist-docs/scripts/render-docs-html.js +148 -0
  23. package/dist-docs/scripts/render-docs-html.js.map +1 -0
  24. package/docs/FOLDERS.md +14 -0
  25. package/docs/README.md +1173 -0
  26. package/docs/architecture/README.md +1391 -0
  27. package/docs/architecture/index.html +938 -0
  28. package/docs/architecture/network/README.md +1210 -0
  29. package/docs/architecture/network/index.html +908 -0
  30. package/docs/assets/ascii-maze.bundle.js +16542 -0
  31. package/docs/assets/ascii-maze.bundle.js.map +7 -0
  32. package/docs/index.html +1419 -0
  33. package/docs/methods/README.md +670 -0
  34. package/docs/methods/index.html +477 -0
  35. package/docs/multithreading/README.md +274 -0
  36. package/docs/multithreading/index.html +215 -0
  37. package/docs/multithreading/workers/README.md +23 -0
  38. package/docs/multithreading/workers/browser/README.md +39 -0
  39. package/docs/multithreading/workers/browser/index.html +70 -0
  40. package/docs/multithreading/workers/index.html +57 -0
  41. package/docs/multithreading/workers/node/README.md +33 -0
  42. package/docs/multithreading/workers/node/index.html +66 -0
  43. package/docs/neat/README.md +1284 -0
  44. package/docs/neat/index.html +906 -0
  45. package/docs/src/README.md +2659 -0
  46. package/docs/src/index.html +1579 -0
  47. package/jest.config.ts +32 -0
  48. package/package.json +99 -0
  49. package/plans/HyperMorphoNEAT.md +293 -0
  50. package/plans/ONNX_EXPORT_PLAN.md +46 -0
  51. package/scripts/generate-docs.ts +486 -0
  52. package/scripts/render-docs-html.ts +138 -0
  53. package/scripts/types.d.ts +2 -0
  54. package/src/README.md +2659 -0
  55. package/src/architecture/README.md +1391 -0
  56. package/src/architecture/activationArrayPool.ts +135 -0
  57. package/src/architecture/architect.ts +635 -0
  58. package/src/architecture/connection.ts +148 -0
  59. package/src/architecture/group.ts +406 -0
  60. package/src/architecture/layer.ts +804 -0
  61. package/src/architecture/network/README.md +1210 -0
  62. package/src/architecture/network/network.activate.ts +223 -0
  63. package/src/architecture/network/network.connect.ts +157 -0
  64. package/src/architecture/network/network.deterministic.ts +167 -0
  65. package/src/architecture/network/network.evolve.ts +426 -0
  66. package/src/architecture/network/network.gating.ts +186 -0
  67. package/src/architecture/network/network.genetic.ts +247 -0
  68. package/src/architecture/network/network.mutate.ts +624 -0
  69. package/src/architecture/network/network.onnx.ts +463 -0
  70. package/src/architecture/network/network.prune.ts +216 -0
  71. package/src/architecture/network/network.remove.ts +96 -0
  72. package/src/architecture/network/network.serialize.ts +309 -0
  73. package/src/architecture/network/network.slab.ts +262 -0
  74. package/src/architecture/network/network.standalone.ts +246 -0
  75. package/src/architecture/network/network.stats.ts +59 -0
  76. package/src/architecture/network/network.topology.ts +86 -0
  77. package/src/architecture/network/network.training.ts +1278 -0
  78. package/src/architecture/network.ts +1302 -0
  79. package/src/architecture/node.ts +1288 -0
  80. package/src/architecture/onnx.ts +3 -0
  81. package/src/config.ts +83 -0
  82. package/src/methods/README.md +670 -0
  83. package/src/methods/activation.ts +372 -0
  84. package/src/methods/connection.ts +31 -0
  85. package/src/methods/cost.ts +347 -0
  86. package/src/methods/crossover.ts +63 -0
  87. package/src/methods/gating.ts +43 -0
  88. package/src/methods/methods.ts +8 -0
  89. package/src/methods/mutation.ts +300 -0
  90. package/src/methods/rate.ts +257 -0
  91. package/src/methods/selection.ts +65 -0
  92. package/src/multithreading/README.md +274 -0
  93. package/src/multithreading/multi.ts +339 -0
  94. package/src/multithreading/workers/README.md +23 -0
  95. package/src/multithreading/workers/browser/README.md +39 -0
  96. package/src/multithreading/workers/browser/testworker.ts +99 -0
  97. package/src/multithreading/workers/node/README.md +33 -0
  98. package/src/multithreading/workers/node/testworker.ts +72 -0
  99. package/src/multithreading/workers/node/worker.ts +70 -0
  100. package/src/multithreading/workers/workers.ts +22 -0
  101. package/src/neat/README.md +1284 -0
  102. package/src/neat/neat.adaptive.ts +544 -0
  103. package/src/neat/neat.compat.ts +164 -0
  104. package/src/neat/neat.constants.ts +20 -0
  105. package/src/neat/neat.diversity.ts +217 -0
  106. package/src/neat/neat.evaluate.ts +328 -0
  107. package/src/neat/neat.evolve.ts +1026 -0
  108. package/src/neat/neat.export.ts +249 -0
  109. package/src/neat/neat.helpers.ts +235 -0
  110. package/src/neat/neat.lineage.ts +220 -0
  111. package/src/neat/neat.multiobjective.ts +260 -0
  112. package/src/neat/neat.mutation.ts +718 -0
  113. package/src/neat/neat.objectives.ts +157 -0
  114. package/src/neat/neat.pruning.ts +190 -0
  115. package/src/neat/neat.selection.ts +269 -0
  116. package/src/neat/neat.speciation.ts +460 -0
  117. package/src/neat/neat.species.ts +151 -0
  118. package/src/neat/neat.telemetry.exports.ts +469 -0
  119. package/src/neat/neat.telemetry.ts +933 -0
  120. package/src/neat/neat.types.ts +275 -0
  121. package/src/neat.ts +1042 -0
  122. package/src/neataptic.ts +10 -0
  123. package/test/architecture/activationArrayPool.capacity.test.ts +19 -0
  124. package/test/architecture/activationArrayPool.test.ts +46 -0
  125. package/test/architecture/connection.test.ts +290 -0
  126. package/test/architecture/group.test.ts +950 -0
  127. package/test/architecture/layer.test.ts +1535 -0
  128. package/test/architecture/network.pruning.test.ts +65 -0
  129. package/test/architecture/node.test.ts +1602 -0
  130. package/test/examples/asciiMaze/asciiMaze.e2e.test.ts +499 -0
  131. package/test/examples/asciiMaze/asciiMaze.ts +41 -0
  132. package/test/examples/asciiMaze/browser-entry.ts +164 -0
  133. package/test/examples/asciiMaze/browserLogger.ts +221 -0
  134. package/test/examples/asciiMaze/browserTerminalUtility.ts +48 -0
  135. package/test/examples/asciiMaze/colors.ts +119 -0
  136. package/test/examples/asciiMaze/dashboardManager.ts +968 -0
  137. package/test/examples/asciiMaze/evolutionEngine.ts +1248 -0
  138. package/test/examples/asciiMaze/fitness.ts +136 -0
  139. package/test/examples/asciiMaze/index.html +128 -0
  140. package/test/examples/asciiMaze/index.ts +26 -0
  141. package/test/examples/asciiMaze/interfaces.ts +235 -0
  142. package/test/examples/asciiMaze/mazeMovement.ts +996 -0
  143. package/test/examples/asciiMaze/mazeUtils.ts +278 -0
  144. package/test/examples/asciiMaze/mazeVision.ts +402 -0
  145. package/test/examples/asciiMaze/mazeVisualization.ts +585 -0
  146. package/test/examples/asciiMaze/mazes.ts +245 -0
  147. package/test/examples/asciiMaze/networkRefinement.ts +76 -0
  148. package/test/examples/asciiMaze/networkVisualization.ts +901 -0
  149. package/test/examples/asciiMaze/terminalUtility.ts +73 -0
  150. package/test/methods/activation.test.ts +1142 -0
  151. package/test/methods/connection.test.ts +146 -0
  152. package/test/methods/cost.test.ts +1123 -0
  153. package/test/methods/crossover.test.ts +202 -0
  154. package/test/methods/gating.test.ts +144 -0
  155. package/test/methods/mutation.test.ts +451 -0
  156. package/test/methods/optimizers.advanced.test.ts +80 -0
  157. package/test/methods/optimizers.behavior.test.ts +105 -0
  158. package/test/methods/optimizers.formula.test.ts +89 -0
  159. package/test/methods/rate.cosineWarmRestarts.test.ts +44 -0
  160. package/test/methods/rate.linearWarmupDecay.test.ts +41 -0
  161. package/test/methods/rate.reduceOnPlateau.test.ts +45 -0
  162. package/test/methods/rate.test.ts +684 -0
  163. package/test/methods/selection.test.ts +245 -0
  164. package/test/multithreading/activations.functions.test.ts +54 -0
  165. package/test/multithreading/multi.test.ts +290 -0
  166. package/test/multithreading/worker.node.process.test.ts +39 -0
  167. package/test/multithreading/workers.coverage.test.ts +36 -0
  168. package/test/multithreading/workers.dynamic.import.test.ts +8 -0
  169. package/test/neat/neat.adaptive.complexityBudget.test.ts +34 -0
  170. package/test/neat/neat.adaptive.criterion.complexity.test.ts +50 -0
  171. package/test/neat/neat.adaptive.mutation.strategy.test.ts +37 -0
  172. package/test/neat/neat.adaptive.operator.decay.test.ts +31 -0
  173. package/test/neat/neat.adaptive.phasedComplexity.test.ts +25 -0
  174. package/test/neat/neat.adaptive.pruning.test.ts +25 -0
  175. package/test/neat/neat.adaptive.targetSpecies.test.ts +43 -0
  176. package/test/neat/neat.additional.coverage.test.ts +126 -0
  177. package/test/neat/neat.advanced.enhancements.test.ts +85 -0
  178. package/test/neat/neat.advanced.test.ts +589 -0
  179. package/test/neat/neat.diversity.autocompat.test.ts +47 -0
  180. package/test/neat/neat.diversity.metrics.test.ts +21 -0
  181. package/test/neat/neat.diversity.stats.test.ts +44 -0
  182. package/test/neat/neat.enhancements.test.ts +79 -0
  183. package/test/neat/neat.entropy.ancestorAdaptive.test.ts +133 -0
  184. package/test/neat/neat.entropy.compat.csv.test.ts +108 -0
  185. package/test/neat/neat.evolution.pruning.test.ts +39 -0
  186. package/test/neat/neat.fastmode.autotune.test.ts +42 -0
  187. package/test/neat/neat.innovation.test.ts +134 -0
  188. package/test/neat/neat.lineage.antibreeding.test.ts +35 -0
  189. package/test/neat/neat.lineage.entropy.test.ts +56 -0
  190. package/test/neat/neat.lineage.inbreeding.test.ts +49 -0
  191. package/test/neat/neat.lineage.pressure.test.ts +29 -0
  192. package/test/neat/neat.multiobjective.adaptive.test.ts +57 -0
  193. package/test/neat/neat.multiobjective.dynamic.schedule.test.ts +46 -0
  194. package/test/neat/neat.multiobjective.dynamic.test.ts +31 -0
  195. package/test/neat/neat.multiobjective.fastsort.delegation.test.ts +51 -0
  196. package/test/neat/neat.multiobjective.prune.test.ts +39 -0
  197. package/test/neat/neat.multiobjective.test.ts +21 -0
  198. package/test/neat/neat.mutation.undefined.pool.test.ts +24 -0
  199. package/test/neat/neat.objective.events.test.ts +26 -0
  200. package/test/neat/neat.objective.importance.test.ts +21 -0
  201. package/test/neat/neat.objective.lifetimes.test.ts +33 -0
  202. package/test/neat/neat.offspring.allocation.test.ts +22 -0
  203. package/test/neat/neat.operator.bandit.test.ts +17 -0
  204. package/test/neat/neat.operator.phases.test.ts +38 -0
  205. package/test/neat/neat.pruneInactive.behavior.test.ts +54 -0
  206. package/test/neat/neat.reenable.adaptation.test.ts +18 -0
  207. package/test/neat/neat.rng.state.test.ts +22 -0
  208. package/test/neat/neat.spawn.add.test.ts +123 -0
  209. package/test/neat/neat.speciation.test.ts +96 -0
  210. package/test/neat/neat.species.allocation.telemetry.test.ts +26 -0
  211. package/test/neat/neat.species.history.csv.test.ts +24 -0
  212. package/test/neat/neat.telemetry.advanced.test.ts +226 -0
  213. package/test/neat/neat.telemetry.csv.lineage.test.ts +19 -0
  214. package/test/neat/neat.telemetry.parity.test.ts +42 -0
  215. package/test/neat/neat.telemetry.stream.test.ts +19 -0
  216. package/test/neat/neat.telemetry.test.ts +16 -0
  217. package/test/neat/neat.test.ts +422 -0
  218. package/test/neat/neat.utilities.test.ts +44 -0
  219. package/test/network/__suppress_console.ts +9 -0
  220. package/test/network/acyclic.topoorder.test.ts +17 -0
  221. package/test/network/checkpoint.metricshook.test.ts +36 -0
  222. package/test/network/error.handling.test.ts +581 -0
  223. package/test/network/evolution.test.ts +285 -0
  224. package/test/network/genetic.test.ts +208 -0
  225. package/test/network/learning.capability.test.ts +244 -0
  226. package/test/network/mutation.effects.test.ts +492 -0
  227. package/test/network/network.activate.test.ts +115 -0
  228. package/test/network/network.activateBatch.test.ts +30 -0
  229. package/test/network/network.deterministic.test.ts +64 -0
  230. package/test/network/network.evolve.branches.test.ts +75 -0
  231. package/test/network/network.evolve.multithread.branches.test.ts +83 -0
  232. package/test/network/network.evolve.test.ts +100 -0
  233. package/test/network/network.gating.removal.test.ts +93 -0
  234. package/test/network/network.mutate.additional.test.ts +145 -0
  235. package/test/network/network.mutate.edgecases.test.ts +101 -0
  236. package/test/network/network.mutate.test.ts +101 -0
  237. package/test/network/network.prune.earlyexit.test.ts +38 -0
  238. package/test/network/network.remove.errors.test.ts +45 -0
  239. package/test/network/network.slab.fallbacks.test.ts +22 -0
  240. package/test/network/network.stats.test.ts +45 -0
  241. package/test/network/network.training.advanced.test.ts +149 -0
  242. package/test/network/network.training.basic.test.ts +228 -0
  243. package/test/network/network.training.helpers.test.ts +183 -0
  244. package/test/network/onnx.export.test.ts +310 -0
  245. package/test/network/onnx.import.test.ts +129 -0
  246. package/test/network/pruning.topology.test.ts +282 -0
  247. package/test/network/regularization.determinism.test.ts +83 -0
  248. package/test/network/regularization.dropconnect.test.ts +17 -0
  249. package/test/network/regularization.dropconnect.validation.test.ts +18 -0
  250. package/test/network/regularization.stochasticdepth.test.ts +27 -0
  251. package/test/network/regularization.test.ts +843 -0
  252. package/test/network/regularization.weightnoise.test.ts +30 -0
  253. package/test/network/setupTests.ts +2 -0
  254. package/test/network/standalone.test.ts +332 -0
  255. package/test/network/structure.serialization.test.ts +660 -0
  256. package/test/training/training.determinism.mixed-precision.test.ts +134 -0
  257. package/test/training/training.earlystopping.test.ts +91 -0
  258. package/test/training/training.edge-cases.test.ts +91 -0
  259. package/test/training/training.extensions.test.ts +47 -0
  260. package/test/training/training.gradient.features.test.ts +110 -0
  261. package/test/training/training.gradient.refinements.test.ts +170 -0
  262. package/test/training/training.gradient.separate-bias.test.ts +41 -0
  263. package/test/training/training.optimizer.test.ts +48 -0
  264. package/test/training/training.plateau.smoothing.test.ts +58 -0
  265. package/test/training/training.smoothing.types.test.ts +174 -0
  266. package/test/training/training.train.options.coverage.test.ts +52 -0
  267. package/test/utils/console-helper.ts +76 -0
  268. package/test/utils/jest-setup.ts +60 -0
  269. package/test/utils/test-helpers.ts +175 -0
  270. package/tsconfig.docs.json +12 -0
  271. package/tsconfig.json +21 -0
  272. package/webpack.config.js +49 -0
@@ -0,0 +1,1302 @@
1
+ import Node from './node';
2
+ import Connection from './connection';
3
+ import Multi from '../multithreading/multi';
4
+ import * as methods from '../methods/methods';
5
+ import mutation from '../methods/mutation'; // Import mutation methods
6
+ import { config } from '../config'; // Import configuration settings
7
+ import { activationArrayPool, ActivationArray } from './activationArrayPool';
8
+ // ONNX export/import now lives in ./network/network.onnx (re-exported via ./onnx for backwards compat)
9
+ import { exportToONNX } from './onnx';
10
+ import { generateStandalone } from './network/network.standalone';
11
+ import {
12
+ computeTopoOrder as _computeTopoOrder,
13
+ hasPath as _hasPath,
14
+ } from './network/network.topology';
15
+ import {
16
+ rebuildConnectionSlab as _rebuildConnectionSlab,
17
+ fastSlabActivate as _fastSlabActivate,
18
+ canUseFastSlab as _canUseFastSlab,
19
+ getConnectionSlab as _getConnectionSlab,
20
+ } from './network/network.slab';
21
+ import {
22
+ maybePrune as _maybePrune,
23
+ pruneToSparsity as _pruneToSparsity,
24
+ getCurrentSparsity as _getCurrentSparsity,
25
+ } from './network/network.prune';
26
+ import {
27
+ gate as _gate,
28
+ ungate as _ungate,
29
+ removeNode as _removeNode,
30
+ } from './network/network.gating';
31
+ import {
32
+ setSeed as _setSeed,
33
+ snapshotRNG as _snapshotRNG,
34
+ restoreRNG as _restoreRNG,
35
+ getRNGState as _getRNGState,
36
+ setRNGState as _setRNGState,
37
+ } from './network/network.deterministic';
38
+ import { getRegularizationStats as _getRegularizationStats } from './network/network.stats';
39
+ import { removeNode as _removeNodeStandalone } from './network/network.remove';
40
+ import {
41
+ connect as _connect,
42
+ disconnect as _disconnect,
43
+ } from './network/network.connect';
44
+ import {
45
+ serialize as _serialize,
46
+ deserialize as _deserialize,
47
+ toJSONImpl as _toJSONImpl,
48
+ fromJSONImpl as _fromJSONImpl,
49
+ } from './network/network.serialize';
50
+ import { crossOver as _crossOver } from './network/network.genetic';
51
+
52
+ export default class Network {
53
+ input: number;
54
+ output: number;
55
+ score?: number;
56
+ nodes: Node[];
57
+ connections: Connection[];
58
+ gates: Connection[];
59
+ selfconns: Connection[];
60
+ dropout: number = 0;
61
+ private _dropConnectProb: number = 0;
62
+ private _lastGradNorm?: number;
63
+ private _optimizerStep: number = 0;
64
+ private _weightNoiseStd: number = 0;
65
+ private _weightNoisePerHidden: number[] = [];
66
+ private _weightNoiseSchedule?: (step: number) => number;
67
+ private _stochasticDepth: number[] = [];
68
+ private _wnOrig?: number[];
69
+ private _trainingStep: number = 0;
70
+ private _rand: () => number = Math.random;
71
+ private _rngState?: number;
72
+ private _lastStats: any = null;
73
+ private _stochasticDepthSchedule?: (
74
+ step: number,
75
+ current: number[]
76
+ ) => number[];
77
+ private _mixedPrecision: { enabled: boolean; lossScale: number } = {
78
+ enabled: false,
79
+ lossScale: 1,
80
+ };
81
+ private _mixedPrecisionState: {
82
+ goodSteps: number;
83
+ badSteps: number;
84
+ minLossScale: number;
85
+ maxLossScale: number;
86
+ overflowCount?: number;
87
+ scaleUpEvents?: number;
88
+ scaleDownEvents?: number;
89
+ } = {
90
+ goodSteps: 0,
91
+ badSteps: 0,
92
+ minLossScale: 1,
93
+ maxLossScale: 65536,
94
+ overflowCount: 0,
95
+ scaleUpEvents: 0,
96
+ scaleDownEvents: 0,
97
+ };
98
+ private _gradAccumMicroBatches: number = 0;
99
+ private _currentGradClip?: {
100
+ mode: 'norm' | 'percentile' | 'layerwiseNorm' | 'layerwisePercentile';
101
+ maxNorm?: number;
102
+ percentile?: number;
103
+ };
104
+ private _lastRawGradNorm: number = 0;
105
+ private _accumulationReduction: 'average' | 'sum' = 'average';
106
+ private _gradClipSeparateBias: boolean = false;
107
+ private _lastGradClipGroupCount: number = 0;
108
+ private _lastOverflowStep: number = -1;
109
+ private _forceNextOverflow: boolean = false;
110
+ private _pruningConfig?: {
111
+ start: number;
112
+ end: number;
113
+ targetSparsity: number;
114
+ regrowFraction: number;
115
+ frequency: number;
116
+ method: 'magnitude' | 'snip';
117
+ lastPruneIter?: number;
118
+ };
119
+ private _initialConnectionCount?: number;
120
+ private _enforceAcyclic: boolean = false;
121
+ private _topoOrder: Node[] | null = null;
122
+ private _topoDirty: boolean = true;
123
+ private _globalEpoch: number = 0;
124
+ layers?: any[];
125
+ private _evoInitialConnCount?: number; // baseline for evolution-time pruning
126
+ private _activationPrecision: 'f64' | 'f32' = 'f64'; // typed array precision for compiled path
127
+ private _reuseActivationArrays: boolean = false; // reuse pooled output arrays
128
+ private _returnTypedActivations: boolean = false; // if true and reuse enabled, return typed array directly
129
+ private _activationPool?: Float32Array | Float64Array; // pooled output array
130
+ // Packed connection slab fields (for memory + cache efficiency when iterating connections)
131
+ private _connWeights?: Float32Array | Float64Array;
132
+ private _connFrom?: Uint32Array;
133
+ private _connTo?: Uint32Array;
134
+ private _slabDirty: boolean = true;
135
+ private _useFloat32Weights: boolean = true;
136
+ // Cached node.index maintenance (avoids repeated this.nodes.indexOf in hot paths like slab rebuild)
137
+ private _nodeIndexDirty: boolean = true; // when true, node.index values must be reassigned sequentially
138
+ // Fast slab forward path structures
139
+ private _outStart?: Uint32Array;
140
+ private _outOrder?: Uint32Array;
141
+ private _adjDirty: boolean = true;
142
+ // Cached typed arrays for fast slab forward pass
143
+ private _fastA?: Float32Array | Float64Array;
144
+ private _fastS?: Float32Array | Float64Array;
145
+ // Internal hint: track a preferred linear chain edge to split on subsequent ADD_NODE mutations
146
+ // to encourage deep path formation even in stochastic modes. Updated each time we split it.
147
+ private _preferredChainEdge?: Connection;
148
+
149
+ // Slab helpers delegated to network.slab.ts
150
+ private _canUseFastSlab(training: boolean) {
151
+ return _canUseFastSlab.call(this, training);
152
+ }
153
+ private _fastSlabActivate(input: number[]) {
154
+ return _fastSlabActivate.call(this, input);
155
+ }
156
+ rebuildConnectionSlab(force = false) {
157
+ return _rebuildConnectionSlab.call(this, force);
158
+ }
159
+ getConnectionSlab() {
160
+ return _getConnectionSlab.call(this);
161
+ }
162
+ constructor(
163
+ input: number,
164
+ output: number,
165
+ options?: {
166
+ minHidden?: number;
167
+ seed?: number;
168
+ enforceAcyclic?: boolean;
169
+ activationPrecision?: 'f32' | 'f64';
170
+ reuseActivationArrays?: boolean;
171
+ returnTypedActivations?: boolean;
172
+ }
173
+ ) {
174
+ // Validate that input and output sizes are provided.
175
+ if (typeof input === 'undefined' || typeof output === 'undefined') {
176
+ throw new Error('No input or output size given');
177
+ }
178
+
179
+ // Initialize network properties
180
+ this.input = input;
181
+ this.output = output;
182
+ this.nodes = [];
183
+ this.connections = [];
184
+ this.gates = [];
185
+ this.selfconns = [];
186
+ this.dropout = 0;
187
+ this._enforceAcyclic = (options as any)?.enforceAcyclic || false;
188
+ if (options?.activationPrecision) {
189
+ this._activationPrecision = options.activationPrecision;
190
+ } else if (config.float32Mode) {
191
+ this._activationPrecision = 'f32';
192
+ }
193
+ if (options?.reuseActivationArrays) this._reuseActivationArrays = true;
194
+ if (options?.returnTypedActivations) this._returnTypedActivations = true;
195
+ // Configure and prewarm the activation pool based on global config
196
+ try {
197
+ if (typeof config.poolMaxPerBucket === 'number')
198
+ activationArrayPool.setMaxPerBucket(config.poolMaxPerBucket);
199
+ const prewarm =
200
+ typeof config.poolPrewarmCount === 'number'
201
+ ? config.poolPrewarmCount
202
+ : 2;
203
+ activationArrayPool.prewarm(this.output, prewarm);
204
+ } catch {}
205
+
206
+ if (options?.seed !== undefined) {
207
+ this.setSeed(options.seed);
208
+ }
209
+
210
+ for (let i = 0; i < this.input + this.output; i++) {
211
+ const type = i < this.input ? 'input' : 'output';
212
+ this.nodes.push(new Node(type, undefined, this._rand));
213
+ }
214
+ for (let i = 0; i < this.input; i++) {
215
+ for (let j = this.input; j < this.input + this.output; j++) {
216
+ const weight = this._rand() * this.input * Math.sqrt(2 / this.input);
217
+ this.connect(this.nodes[i], this.nodes[j], weight);
218
+ }
219
+ }
220
+
221
+ const minHidden = options?.minHidden || 0;
222
+ if (minHidden > 0) {
223
+ while (this.nodes.length < this.input + this.output + minHidden) {
224
+ this.addNodeBetween();
225
+ }
226
+ }
227
+ }
228
+
229
+ // --- Added: structural helper referenced by constructor (split a random connection) ---
230
+ private addNodeBetween(): void {
231
+ if (this.connections.length === 0) return;
232
+ const idx = Math.floor(this._rand() * this.connections.length);
233
+ const conn = this.connections[idx];
234
+ if (!conn) return;
235
+ // Remove original connection
236
+ this.disconnect(conn.from, conn.to);
237
+ // Create new hidden node
238
+ const newNode = new Node('hidden', undefined, this._rand);
239
+ this.nodes.push(newNode);
240
+ // Connect from->newNode and newNode->to
241
+ this.connect(conn.from, newNode, conn.weight); // keep original weight on first leg
242
+ this.connect(newNode, conn.to, 1); // second leg weight initialised randomly or 1
243
+ // Invalidate topo cache
244
+ this._topoDirty = true;
245
+ this._nodeIndexDirty = true; // structure changed
246
+ }
247
+
248
+ // --- DropConnect API (re-added for tests) ---
249
+ enableDropConnect(p: number) {
250
+ if (p < 0 || p >= 1)
251
+ throw new Error('DropConnect probability must be in [0,1)');
252
+ this._dropConnectProb = p;
253
+ }
254
+ disableDropConnect() {
255
+ this._dropConnectProb = 0;
256
+ }
257
+
258
+ // --- Acyclic enforcement toggle (used by tests) ---
259
+ setEnforceAcyclic(flag: boolean) {
260
+ this._enforceAcyclic = !!flag;
261
+ }
262
+ private _computeTopoOrder() {
263
+ return _computeTopoOrder.call(this);
264
+ }
265
+ private _hasPath(from: Node, to: Node) {
266
+ return _hasPath.call(this, from, to);
267
+ }
268
+
269
+ // --- Pruning configuration & helpers ---
270
+ configurePruning(cfg: {
271
+ start: number;
272
+ end: number;
273
+ targetSparsity: number;
274
+ regrowFraction?: number;
275
+ frequency?: number;
276
+ method?: 'magnitude' | 'snip';
277
+ }) {
278
+ const { start, end, targetSparsity } = cfg;
279
+ if (start < 0 || end < start)
280
+ throw new Error('Invalid pruning schedule window');
281
+ if (targetSparsity <= 0 || targetSparsity >= 1)
282
+ throw new Error('targetSparsity must be in (0,1)');
283
+ this._pruningConfig = {
284
+ start,
285
+ end,
286
+ targetSparsity,
287
+ regrowFraction: cfg.regrowFraction ?? 0,
288
+ frequency: cfg.frequency ?? 1,
289
+ method: cfg.method || 'magnitude',
290
+ lastPruneIter: undefined,
291
+ };
292
+ this._initialConnectionCount = this.connections.length;
293
+ }
294
+ getCurrentSparsity(): number {
295
+ return _getCurrentSparsity.call(this);
296
+ }
297
+ private _maybePrune(iteration: number) {
298
+ return _maybePrune.call(this, iteration);
299
+ }
300
+
301
+ /**
302
+ * Immediately prune connections to reach (or approach) a target sparsity fraction.
303
+ * Used by evolutionary pruning (generation-based) independent of training iteration schedule.
304
+ * @param targetSparsity fraction in (0,1). 0.8 means keep 20% of original (if first call sets baseline)
305
+ * @param method 'magnitude' | 'snip'
306
+ */
307
+ pruneToSparsity(
308
+ targetSparsity: number,
309
+ method: 'magnitude' | 'snip' = 'magnitude'
310
+ ) {
311
+ return _pruneToSparsity.call(this, targetSparsity, method);
312
+ }
313
+
314
+ /** Enable weight noise. Provide a single std dev number or { perHiddenLayer: number[] }. */
315
+ enableWeightNoise(stdDev: number | { perHiddenLayer: number[] }) {
316
+ if (typeof stdDev === 'number') {
317
+ if (stdDev < 0) throw new Error('Weight noise stdDev must be >= 0');
318
+ this._weightNoiseStd = stdDev;
319
+ this._weightNoisePerHidden = [];
320
+ } else if (stdDev && Array.isArray(stdDev.perHiddenLayer)) {
321
+ if (!this.layers || this.layers.length < 3)
322
+ throw new Error(
323
+ 'Per-hidden-layer weight noise requires a layered network with at least one hidden layer'
324
+ );
325
+ const hiddenLayerCount = this.layers.length - 2;
326
+ if (stdDev.perHiddenLayer.length !== hiddenLayerCount)
327
+ throw new Error(
328
+ `Expected ${hiddenLayerCount} std dev entries (one per hidden layer), got ${stdDev.perHiddenLayer.length}`
329
+ );
330
+ if (stdDev.perHiddenLayer.some((s) => s < 0))
331
+ throw new Error('Weight noise std devs must be >= 0');
332
+ this._weightNoiseStd = 0; // disable global
333
+ this._weightNoisePerHidden = stdDev.perHiddenLayer.slice();
334
+ } else {
335
+ throw new Error('Invalid weight noise configuration');
336
+ }
337
+ }
338
+ disableWeightNoise() {
339
+ this._weightNoiseStd = 0;
340
+ this._weightNoisePerHidden = [];
341
+ }
342
+ setWeightNoiseSchedule(fn: (step: number) => number) {
343
+ this._weightNoiseSchedule = fn;
344
+ }
345
+ clearWeightNoiseSchedule() {
346
+ this._weightNoiseSchedule = undefined;
347
+ }
348
+ setRandom(fn: () => number) {
349
+ this._rand = fn;
350
+ }
351
+ setSeed(seed: number) {
352
+ _setSeed.call(this, seed);
353
+ }
354
+ testForceOverflow() {
355
+ this._forceNextOverflow = true;
356
+ }
357
+ get trainingStep() {
358
+ return this._trainingStep;
359
+ }
360
+ get lastSkippedLayers(): number[] {
361
+ return (this as any)._lastSkippedLayers || [];
362
+ }
363
+ snapshotRNG(): any {
364
+ return _snapshotRNG.call(this);
365
+ }
366
+ restoreRNG(fn: () => number) {
367
+ _restoreRNG.call(this, fn);
368
+ }
369
+ getRNGState(): number | undefined {
370
+ return _getRNGState.call(this);
371
+ }
372
+ setRNGState(state: number) {
373
+ _setRNGState.call(this, state);
374
+ }
375
+ setStochasticDepthSchedule(
376
+ fn: (step: number, current: number[]) => number[]
377
+ ) {
378
+ this._stochasticDepthSchedule = fn;
379
+ }
380
+ clearStochasticDepthSchedule() {
381
+ this._stochasticDepthSchedule = undefined;
382
+ }
383
+ getRegularizationStats() {
384
+ return _getRegularizationStats.call(this);
385
+ }
386
+
387
+ /** Configure stochastic depth with survival probabilities per hidden layer (length must match hidden layer count when using layered network). */
388
+ setStochasticDepth(survival: number[]) {
389
+ if (!Array.isArray(survival)) throw new Error('survival must be an array');
390
+ if (survival.some((p) => p <= 0 || p > 1))
391
+ throw new Error('Stochastic depth survival probs must be in (0,1]');
392
+ if (!this.layers || this.layers.length === 0)
393
+ throw new Error('Stochastic depth requires layer-based network');
394
+ // layers includes input and output; hidden layers are layers[1..length-2]
395
+ const hiddenLayerCount = Math.max(0, this.layers.length - 2);
396
+ if (survival.length !== hiddenLayerCount)
397
+ throw new Error(
398
+ `Expected ${hiddenLayerCount} survival probabilities for hidden layers, got ${survival.length}`
399
+ );
400
+ this._stochasticDepth = survival.slice();
401
+ }
402
+ disableStochasticDepth() {
403
+ this._stochasticDepth = [];
404
+ }
405
+
406
+ /**
407
+ * Creates a deep copy of the network.
408
+ * @returns {Network} A new Network instance that is a clone of the current network.
409
+ */
410
+ clone(): Network {
411
+ return Network.fromJSON(this.toJSON());
412
+ }
413
+
414
+ /**
415
+ * Resets all masks in the network to 1 (no dropout). Applies to both node-level and layer-level dropout.
416
+ * Should be called after training to ensure inference is unaffected by previous dropout.
417
+ */
418
+ resetDropoutMasks(): void {
419
+ if (this.layers && this.layers.length > 0) {
420
+ for (const layer of this.layers) {
421
+ if (typeof layer.nodes !== 'undefined') {
422
+ for (const node of layer.nodes) {
423
+ if (typeof node.mask !== 'undefined') node.mask = 1;
424
+ }
425
+ }
426
+ }
427
+ } else {
428
+ for (const node of this.nodes) {
429
+ if (typeof node.mask !== 'undefined') node.mask = 1;
430
+ }
431
+ }
432
+ }
433
+
434
+ // Delegated standalone generator
435
+ standalone(): string {
436
+ return generateStandalone(this as any);
437
+ }
438
+
439
+ /**
440
+ * Activates the network using the given input array.
441
+ * Performs a forward pass through the network, calculating the activation of each node.
442
+ *
443
+ * @param {number[]} input - An array of numerical values corresponding to the network's input nodes.
444
+ * @param {boolean} [training=false] - Flag indicating if the activation is part of a training process.
445
+ * @param {number} [maxActivationDepth=1000] - Maximum allowed activation depth to prevent infinite loops/cycles.
446
+ * @returns {number[]} An array of numerical values representing the activations of the network's output nodes.
447
+ */
448
+ /**
449
+ * Standard activation API returning a plain number[] for backward compatibility.
450
+ * Internally may use pooled typed arrays; if so they are cloned before returning.
451
+ */
452
+ activate(
453
+ input: number[],
454
+ training = false,
455
+ maxActivationDepth = 1000
456
+ ): number[] {
457
+ if (this._enforceAcyclic && this._topoDirty) this._computeTopoOrder();
458
+ if (!Array.isArray(input) || input.length !== this.input) {
459
+ throw new Error(
460
+ `Input size mismatch: expected ${this.input}, got ${
461
+ input ? input.length : 'undefined'
462
+ }`
463
+ );
464
+ }
465
+ // Fast slab path (inference-only, ungated, acyclic, no stochastic features)
466
+ if (this._canUseFastSlab(training)) {
467
+ try {
468
+ return this._fastSlabActivate(input);
469
+ } catch {
470
+ /* fall back */
471
+ }
472
+ }
473
+ // Acquire pooled activation array for outputs
474
+ const outputArr = activationArrayPool.acquire(this.output);
475
+
476
+ // Check for empty or corrupted network structure
477
+ if (!this.nodes || this.nodes.length === 0) {
478
+ throw new Error(
479
+ 'Network structure is corrupted or empty. No nodes found.'
480
+ );
481
+ }
482
+
483
+ let output: ActivationArray = outputArr;
484
+ (this as any)._lastSkippedLayers = [];
485
+ const stats = {
486
+ droppedHiddenNodes: 0,
487
+ totalHiddenNodes: 0,
488
+ droppedConnections: 0,
489
+ totalConnections: this.connections.length,
490
+ skippedLayers: [] as number[],
491
+ weightNoise: { count: 0, sumAbs: 0, maxAbs: 0, meanAbs: 0 },
492
+ };
493
+ // Pre-apply weight noise
494
+ let appliedWeightNoise = false;
495
+ let dynamicStd = this._weightNoiseStd;
496
+ if (training) {
497
+ if (this._weightNoiseSchedule)
498
+ dynamicStd = this._weightNoiseSchedule(this._trainingStep);
499
+ if (dynamicStd > 0 || this._weightNoisePerHidden.length > 0) {
500
+ for (const c of this.connections) {
501
+ if ((c as any)._origWeightNoise != null) continue;
502
+ (c as any)._origWeightNoise = c.weight;
503
+ let std = dynamicStd;
504
+ if (this._weightNoisePerHidden.length > 0 && this.layers) {
505
+ let fromLayerIndex = -1;
506
+ for (let li = 0; li < this.layers.length; li++) {
507
+ if (this.layers[li].nodes.includes(c.from)) {
508
+ fromLayerIndex = li;
509
+ break;
510
+ }
511
+ }
512
+ if (fromLayerIndex > 0 && fromLayerIndex < this.layers.length) {
513
+ const hiddenIdx = fromLayerIndex - 1;
514
+ if (
515
+ hiddenIdx >= 0 &&
516
+ hiddenIdx < this._weightNoisePerHidden.length
517
+ )
518
+ std = this._weightNoisePerHidden[hiddenIdx];
519
+ }
520
+ }
521
+ if (std > 0) {
522
+ const noise = std * Network._gaussianRand(this._rand);
523
+ c.weight += noise;
524
+ (c as any)._wnLast = noise;
525
+ appliedWeightNoise = true;
526
+ } else {
527
+ (c as any)._wnLast = 0;
528
+ }
529
+ }
530
+ }
531
+ }
532
+ // Optional stochastic depth schedule update
533
+ if (
534
+ training &&
535
+ this._stochasticDepthSchedule &&
536
+ this._stochasticDepth.length > 0
537
+ ) {
538
+ const updated = this._stochasticDepthSchedule(
539
+ this._trainingStep,
540
+ this._stochasticDepth.slice()
541
+ );
542
+ if (
543
+ Array.isArray(updated) &&
544
+ updated.length === this._stochasticDepth.length &&
545
+ !updated.some((p) => p <= 0 || p > 1)
546
+ ) {
547
+ this._stochasticDepth = updated.slice();
548
+ }
549
+ }
550
+ if (
551
+ this.layers &&
552
+ this.layers.length > 0 &&
553
+ this._stochasticDepth.length > 0
554
+ ) {
555
+ // Layered activation with stochastic depth
556
+ let acts: number[] | undefined;
557
+ for (let li = 0; li < this.layers.length; li++) {
558
+ const layer = this.layers[li];
559
+ const isHidden = li > 0 && li < this.layers.length - 1;
560
+ let skip = false;
561
+ if (training && isHidden) {
562
+ const hiddenIndex = li - 1;
563
+ if (hiddenIndex < this._stochasticDepth.length) {
564
+ const surviveProb = this._stochasticDepth[hiddenIndex];
565
+ skip = this._rand() >= surviveProb;
566
+ if (skip) {
567
+ // Only skip if size matches previous outputs
568
+ if (!acts || acts.length !== layer.nodes.length) skip = false;
569
+ }
570
+ if (!skip) {
571
+ // Activate (input layer gets input array)
572
+ const raw =
573
+ li === 0
574
+ ? layer.activate(input, training)
575
+ : layer.activate(undefined, training);
576
+ acts =
577
+ surviveProb < 1
578
+ ? raw.map((a: number) => a * (1 / surviveProb))
579
+ : raw;
580
+ continue;
581
+ }
582
+ }
583
+ }
584
+ if (skip) {
585
+ (this as any)._lastSkippedLayers.push(li);
586
+ stats.skippedLayers.push(li);
587
+ // identity: acts unchanged
588
+ continue;
589
+ }
590
+ const raw =
591
+ li === 0
592
+ ? layer.activate(input, training)
593
+ : layer.activate(undefined, training);
594
+ acts = raw;
595
+ }
596
+ if (acts) {
597
+ for (let i = 0; i < acts.length && i < this.output; i++)
598
+ output[i] = acts[i];
599
+ }
600
+ } else if (this.layers && this.layers.length > 0) {
601
+ // Layered activation with optional node-level dropout (replicating legacy behavior expected by tests)
602
+ let lastActs: number[] | undefined;
603
+ for (let li = 0; li < this.layers.length; li++) {
604
+ const layer = this.layers[li];
605
+ const isHidden = li > 0 && li < this.layers.length - 1;
606
+ // Always call layer.activate with training=false to avoid its uniform layer-level dropout; we'll handle per-node masks ourselves
607
+ const raw =
608
+ li === 0
609
+ ? layer.activate(input, false)
610
+ : layer.activate(undefined, false);
611
+ // Apply node-level dropout to hidden layers if requested
612
+ if (isHidden && training && this.dropout > 0) {
613
+ let dropped = 0;
614
+ for (const node of layer.nodes) {
615
+ node.mask = this._rand() < this.dropout ? 0 : 1;
616
+ stats.totalHiddenNodes++;
617
+ if (node.mask === 0) stats.droppedHiddenNodes++;
618
+ if (node.mask === 0) {
619
+ node.activation = 0; // zero activation so downstream sees dropout
620
+ dropped++;
621
+ }
622
+ }
623
+ // Safeguard: ensure at least one active node remains
624
+ if (dropped === layer.nodes.length && layer.nodes.length > 0) {
625
+ const idx = Math.floor(this._rand() * layer.nodes.length);
626
+ layer.nodes[idx].mask = 1;
627
+ // Recompute activation for that single node using previous layer outputs
628
+ // Simplified: keep existing raw value captured earlier in raw[idx]
629
+ layer.nodes[idx].activation = raw[idx];
630
+ }
631
+ } else if (isHidden) {
632
+ // Ensure masks are 1 during inference
633
+ for (const node of layer.nodes) node.mask = 1;
634
+ }
635
+ lastActs = raw; // (raw may have been partially zeroed above via node.activation edits; raw array still original but not used after output layer)
636
+ }
637
+ if (lastActs) {
638
+ if (this._reuseActivationArrays) {
639
+ for (let i = 0; i < lastActs.length && i < this.output; i++)
640
+ (output as any)[i] = lastActs[i];
641
+ } else {
642
+ for (let i = 0; i < lastActs.length && i < this.output; i++)
643
+ (output as any)[i] = lastActs[i];
644
+ }
645
+ }
646
+ } else {
647
+ // Node-based activation (legacy, node-level dropout)
648
+ let hiddenNodes = this.nodes.filter((node) => node.type === 'hidden');
649
+ let droppedCount = 0;
650
+ if (training && this.dropout > 0) {
651
+ // Randomly drop hidden nodes
652
+ for (const node of hiddenNodes) {
653
+ node.mask = this._rand() < this.dropout ? 0 : 1;
654
+ stats.totalHiddenNodes++;
655
+ if (node.mask === 0) {
656
+ droppedCount++;
657
+ stats.droppedHiddenNodes++;
658
+ }
659
+ }
660
+ // SAFEGUARD: Ensure at least one hidden node is active
661
+ if (droppedCount === hiddenNodes.length && hiddenNodes.length > 0) {
662
+ // Randomly pick one hidden node to keep active
663
+ const idx = Math.floor(this._rand() * hiddenNodes.length);
664
+ hiddenNodes[idx].mask = 1;
665
+ }
666
+ } else {
667
+ for (const node of hiddenNodes) node.mask = 1;
668
+ }
669
+ // Optional weight noise (apply before node activations to all connection weights, store originals)
670
+ if (training && this._weightNoiseStd > 0) {
671
+ if (!this._wnOrig) this._wnOrig = new Array(this.connections.length);
672
+ for (let ci = 0; ci < this.connections.length; ci++) {
673
+ const c = this.connections[ci];
674
+ if ((c as any)._origWeightNoise != null) continue; // already perturbed in recursive call
675
+ (c as any)._origWeightNoise = c.weight;
676
+ const noise =
677
+ this._weightNoiseStd * Network._gaussianRand(this._rand);
678
+ c.weight += noise;
679
+ }
680
+ }
681
+ let outIndex = 0;
682
+ this.nodes.forEach((node, index) => {
683
+ if (node.type === 'input') {
684
+ node.activate(input[index]);
685
+ } else if (node.type === 'output') {
686
+ const activation = node.activate();
687
+ (output as any)[outIndex++] = activation;
688
+ } else {
689
+ node.activate();
690
+ }
691
+ });
692
+ // Apply DropConnect masking to connections post-activation accumulation
693
+ if (training && this._dropConnectProb > 0) {
694
+ for (const conn of this.connections) {
695
+ const mask = this._rand() < this._dropConnectProb ? 0 : 1;
696
+ if (mask === 0) stats.droppedConnections++;
697
+ (conn as any).dcMask = mask;
698
+ if (mask === 0) {
699
+ if ((conn as any)._origWeight == null)
700
+ (conn as any)._origWeight = conn.weight;
701
+ conn.weight = 0;
702
+ } else if ((conn as any)._origWeight != null) {
703
+ conn.weight = (conn as any)._origWeight;
704
+ delete (conn as any)._origWeight;
705
+ }
706
+ }
707
+ } else {
708
+ // restore any temporarily zeroed weights
709
+ for (const conn of this.connections) {
710
+ if ((conn as any)._origWeight != null) {
711
+ conn.weight = (conn as any)._origWeight;
712
+ delete (conn as any)._origWeight;
713
+ }
714
+ (conn as any).dcMask = 1;
715
+ }
716
+ }
717
+ // Restore weight noise
718
+ if (training && appliedWeightNoise) {
719
+ for (const c of this.connections) {
720
+ if ((c as any)._origWeightNoise != null) {
721
+ c.weight = (c as any)._origWeightNoise;
722
+ delete (c as any)._origWeightNoise;
723
+ }
724
+ }
725
+ }
726
+ }
727
+ if (training) this._trainingStep++;
728
+ if (stats.weightNoise.count > 0)
729
+ stats.weightNoise.meanAbs =
730
+ stats.weightNoise.sumAbs / stats.weightNoise.count;
731
+ this._lastStats = stats;
732
+ // Clone and release pooled array for backward compatibility
733
+ const result = Array.from(output as any) as number[];
734
+ activationArrayPool.release(output);
735
+ return result;
736
+ }
737
+
738
+ private static _gaussianRand(rng: () => number = Math.random): number {
739
+ let u = 0,
740
+ v = 0;
741
+ while (u === 0) u = rng();
742
+ while (v === 0) v = rng();
743
+ return Math.sqrt(-2.0 * Math.log(u)) * Math.cos(2.0 * Math.PI * v);
744
+ }
745
+
746
+ /**
747
+ * Activates the network without calculating eligibility traces.
748
+ * This is a performance optimization for scenarios where backpropagation is not needed,
749
+ * such as during testing, evaluation, or deployment (inference).
750
+ *
751
+ * @param {number[]} input - An array of numerical values corresponding to the network's input nodes.
752
+ * The length must match the network's `input` size.
753
+ * @returns {number[]} An array of numerical values representing the activations of the network's output nodes.
754
+ *
755
+ * @see {@link Node.noTraceActivate}
756
+ */
757
+ // Delegated activation helpers
758
+ noTraceActivate(input: number[]): number[] {
759
+ const { noTraceActivate } = require('./network/network.activate');
760
+ return noTraceActivate.call(this, input);
761
+ }
762
+
763
+ /**
764
+ * Raw activation that can return a typed array when pooling is enabled (zero-copy).
765
+ * If reuseActivationArrays=false falls back to standard activate().
766
+ */
767
+ activateRaw(
768
+ input: number[],
769
+ training = false,
770
+ maxActivationDepth = 1000
771
+ ): any {
772
+ const { activateRaw } = require('./network/network.activate');
773
+ return activateRaw.call(this, input, training, maxActivationDepth);
774
+ }
775
+
776
+ /**
777
+ * Activate the network over a batch of input vectors (micro-batching).
778
+ *
779
+ * Currently iterates sample-by-sample while reusing the network's internal
780
+ * fast-path allocations. Outputs are cloned number[] arrays for API
781
+ * compatibility. Future optimizations can vectorize this path.
782
+ *
783
+ * @param inputs Array of input vectors, each length must equal this.input
784
+ * @param training Whether to run with training-time stochastic features
785
+ * @returns Array of output vectors, each length equals this.output
786
+ */
787
+ activateBatch(inputs: number[][], training = false): number[][] {
788
+ const { activateBatch } = require('./network/network.activate');
789
+ return activateBatch.call(this, inputs, training);
790
+ }
791
+
792
+ /**
793
+ * Propagates the error backward through the network (backpropagation).
794
+ * Calculates the error gradient for each node and connection.
795
+ * If `update` is true, it adjusts the weights and biases based on the calculated gradients,
796
+ * learning rate, momentum, and optional L2 regularization.
797
+ *
798
+ * The process starts from the output nodes and moves backward layer by layer (or topologically for recurrent nets).
799
+ *
800
+ * @param {number} rate - The learning rate (controls the step size of weight adjustments).
801
+ * @param {number} momentum - The momentum factor (helps overcome local minima and speeds up convergence). Typically between 0 and 1.
802
+ * @param {boolean} update - If true, apply the calculated weight and bias updates. If false, only calculate gradients (e.g., for batch accumulation).
803
+ * @param {number[]} target - An array of target values corresponding to the network's output nodes.
804
+ * The length must match the network's `output` size.
805
+ * @param {number} [regularization=0] - The L2 regularization factor (lambda). Helps prevent overfitting by penalizing large weights.
806
+ * @param {(target: number, output: number) => number} [costDerivative] - Optional derivative of the cost function for output nodes.
807
+ * @throws {Error} If the `target` array length does not match the network's `output` size.
808
+ *
809
+ * @see {@link Node.propagate} for the node-level backpropagation logic.
810
+ */
811
+ propagate(
812
+ rate: number,
813
+ momentum: number,
814
+ update: boolean,
815
+ target: number[],
816
+ regularization: number = 0, // L2 regularization factor (lambda)
817
+ costDerivative?: (target: number, output: number) => number
818
+ ): void {
819
+ // Validate that the target array matches the network's output size.
820
+ if (!target || target.length !== this.output) {
821
+ throw new Error(
822
+ 'Output target length should match network output length'
823
+ );
824
+ }
825
+
826
+ let targetIndex = target.length; // Initialize index for accessing target values in reverse order.
827
+
828
+ // Propagate error starting from the output nodes (last nodes in the `nodes` array).
829
+ // Iterate backward from the last node to the first output node.
830
+ for (
831
+ let i = this.nodes.length - 1;
832
+ i >= this.nodes.length - this.output;
833
+ i--
834
+ ) {
835
+ if (costDerivative) {
836
+ (this.nodes[i] as any).propagate(
837
+ rate,
838
+ momentum,
839
+ update,
840
+ regularization,
841
+ target[--targetIndex],
842
+ costDerivative
843
+ );
844
+ } else {
845
+ this.nodes[i].propagate(
846
+ rate,
847
+ momentum,
848
+ update,
849
+ regularization,
850
+ target[--targetIndex]
851
+ );
852
+ }
853
+ }
854
+
855
+ // Propagate error backward through the hidden nodes.
856
+ // Iterate backward from the last hidden node to the first hidden node.
857
+ for (let i = this.nodes.length - this.output - 1; i >= this.input; i--) {
858
+ this.nodes[i].propagate(rate, momentum, update, regularization); // Pass regularization factor
859
+ }
860
+ }
861
+
862
+ /**
863
+ * Clears the internal state of all nodes in the network.
864
+ * Resets node activation, state, eligibility traces, and extended traces to their initial values (usually 0).
865
+ * This is typically done before processing a new input sequence in recurrent networks or between training epochs if desired.
866
+ *
867
+ * @see {@link Node.clear}
868
+ */
869
+ clear(): void {
870
+ // Iterate through all nodes and call their clear method.
871
+ this.nodes.forEach((node) => node.clear());
872
+ }
873
+
874
+ /**
875
+ * Mutates the network's structure or parameters according to the specified method.
876
+ * This is a core operation for neuro-evolutionary algorithms (like NEAT).
877
+ * The method argument should be one of the mutation types defined in `methods.mutation`.
878
+ *
879
+ * @param {any} method - The mutation method to apply (e.g., `mutation.ADD_NODE`, `mutation.MOD_WEIGHT`).
880
+ * Some methods might have associated parameters (e.g., `MOD_WEIGHT` uses `min`, `max`).
881
+ * @throws {Error} If no valid mutation `method` is provided.
882
+ *
883
+ * @see {@link methods.mutation} for available mutation types.
884
+ */
885
+ mutate(method: any): void {
886
+ const { mutateImpl } = require('./network/network.mutate');
887
+ return mutateImpl.call(this, method);
888
+ }
889
+
890
+ /**
891
+ * Creates a connection between two nodes in the network.
892
+ * Handles both regular connections and self-connections.
893
+ * Adds the new connection object(s) to the appropriate network list (`connections` or `selfconns`).
894
+ *
895
+ * @param {Node} from - The source node of the connection.
896
+ * @param {Node} to - The target node of the connection.
897
+ * @param {number} [weight] - Optional weight for the connection. If not provided, a random weight is usually assigned by the underlying `Node.connect` method.
898
+ * @returns {Connection[]} An array containing the newly created connection object(s). Typically contains one connection, but might be empty or contain more in specialized node types.
899
+ *
900
+ * @see {@link Node.connect}
901
+ */
902
+ connect(from: Node, to: Node, weight?: number): Connection[] {
903
+ return _connect.call(this, from, to, weight);
904
+ }
905
+
906
+ /**
907
+ * Gates a connection with a specified node.
908
+ * The activation of the `node` (gater) will modulate the weight of the `connection`.
909
+ * Adds the connection to the network's `gates` list.
910
+ *
911
+ * @param {Node} node - The node that will act as the gater. Must be part of this network.
912
+ * @param {Connection} connection - The connection to be gated.
913
+ * @throws {Error} If the provided `node` is not part of this network.
914
+ * @throws {Error} If the `connection` is already gated (though currently handled with a warning).
915
+ *
916
+ * @see {@link Node.gate}
917
+ */
918
+ gate(node: Node, connection: Connection) {
919
+ return _gate.call(this, node, connection);
920
+ }
921
+
922
+ /**
923
+ * Removes a node from the network.
924
+ * This involves:
925
+ * 1. Disconnecting all incoming and outgoing connections associated with the node.
926
+ * 2. Removing any self-connections.
927
+ * 3. Removing the node from the `nodes` array.
928
+ * 4. Attempting to reconnect the node's direct predecessors to its direct successors
929
+ * to maintain network flow, if possible and configured.
930
+ * 5. Handling gates involving the removed node (ungating connections gated *by* this node,
931
+ * and potentially re-gating connections that were gated *by other nodes* onto the removed node's connections).
932
+ *
933
+ * @param {Node} node - The node instance to remove. Must exist within the network's `nodes` list.
934
+ * @throws {Error} If the specified `node` is not found in the network's `nodes` list.
935
+ */
936
+ remove(node: Node) {
937
+ return _removeNodeStandalone.call(this, node);
938
+ }
939
+
940
+ /**
941
+ * Disconnects two nodes, removing the connection between them.
942
+ * Handles both regular connections and self-connections.
943
+ * If the connection being removed was gated, it is also ungated.
944
+ *
945
+ * @param {Node} from - The source node of the connection to remove.
946
+ * @param {Node} to - The target node of the connection to remove.
947
+ *
948
+ * @see {@link Node.disconnect}
949
+ */
950
+ disconnect(from: Node, to: Node): void {
951
+ return _disconnect.call(this, from, to);
952
+ }
953
+
954
+ // slab rebuild + accessor moved to network.slab.ts
955
+
956
+ /**
957
+ * Removes the gate from a specified connection.
958
+ * The connection will no longer be modulated by its gater node.
959
+ * Removes the connection from the network's `gates` list.
960
+ *
961
+ * @param {Connection} connection - The connection object to ungate.
962
+ * @throws {Error} If the provided `connection` is not found in the network's `gates` list (i.e., it wasn't gated).
963
+ *
964
+ * @see {@link Node.ungate}
965
+ */
966
+ ungate(connection: Connection) {
967
+ return _ungate.call(this, connection);
968
+ }
969
+
970
+ /**
971
+ * Trains the network on a given dataset subset for one pass (epoch or batch).
972
+ * Performs activation and backpropagation for each item in the set.
973
+ * Updates weights based on batch size configuration.
974
+ *
975
+ * @param {{ input: number[]; output: number[] }[]} set - The training dataset subset (e.g., a batch or the full set for one epoch).
976
+ * @param {number} batchSize - The number of samples to process before updating weights.
977
+ * @param {number} currentRate - The learning rate to use for this training pass.
978
+ * @param {number} momentum - The momentum factor to use.
979
+ * @param {any} regularization - The regularization configuration (L1, L2, or custom function).
980
+ * @param {(target: number[], output: number[]) => number} costFunction - The function used to calculate the error between target and output.
981
+ * @returns {number} The average error calculated over the provided dataset subset.
982
+ * @private Internal method used by `train`.
983
+ */
984
+ // Removed legacy _trainSet; delegated to network.training.ts
985
+
986
+ // Gradient clipping implemented in network.training.ts (applyGradientClippingImpl). Kept here only for backward compat if reflection used.
987
+ private _applyGradientClipping(cfg: {
988
+ mode: 'norm' | 'percentile' | 'layerwiseNorm' | 'layerwisePercentile';
989
+ maxNorm?: number;
990
+ percentile?: number;
991
+ }) {
992
+ const { applyGradientClippingImpl } = require('./network/network.training');
993
+ applyGradientClippingImpl(this as any, cfg);
994
+ }
995
+
996
+ // Training is implemented in network.training.ts; this wrapper keeps public API stable.
997
+ train(
998
+ set: { input: number[]; output: number[] }[],
999
+ options: any
1000
+ ): { error: number; iterations: number; time: number } {
1001
+ const { trainImpl } = require('./network/network.training');
1002
+ return trainImpl(this as any, set, options);
1003
+ }
1004
+
1005
+ /** Returns last recorded raw (pre-update) gradient L2 norm. */
1006
+ getRawGradientNorm(): number {
1007
+ return this._lastRawGradNorm;
1008
+ }
1009
+ /** Returns current mixed precision loss scale (1 if disabled). */
1010
+ getLossScale(): number {
1011
+ return this._mixedPrecision.lossScale;
1012
+ }
1013
+ /** Returns last gradient clipping group count (0 if no clipping yet). */
1014
+ getLastGradClipGroupCount(): number {
1015
+ return this._lastGradClipGroupCount;
1016
+ }
1017
+ /** Consolidated training stats snapshot. */
1018
+ getTrainingStats() {
1019
+ return {
1020
+ gradNorm: this._lastGradNorm ?? 0,
1021
+ gradNormRaw: this._lastRawGradNorm,
1022
+ lossScale: this._mixedPrecision.lossScale,
1023
+ optimizerStep: this._optimizerStep,
1024
+ mp: {
1025
+ good: this._mixedPrecisionState.goodSteps,
1026
+ bad: this._mixedPrecisionState.badSteps,
1027
+ overflowCount: this._mixedPrecisionState.overflowCount || 0,
1028
+ scaleUps: this._mixedPrecisionState.scaleUpEvents || 0,
1029
+ scaleDowns: this._mixedPrecisionState.scaleDownEvents || 0,
1030
+ lastOverflowStep: this._lastOverflowStep,
1031
+ },
1032
+ };
1033
+ }
1034
+ /** Utility: adjust rate for accumulation mode (use result when switching to 'sum' to mimic 'average'). */
1035
+ static adjustRateForAccumulation(
1036
+ rate: number,
1037
+ accumulationSteps: number,
1038
+ reduction: 'average' | 'sum'
1039
+ ) {
1040
+ if (reduction === 'sum' && accumulationSteps > 1)
1041
+ return rate / accumulationSteps;
1042
+ return rate;
1043
+ }
1044
+
1045
+ // Evolution wrapper delegates to network/network.evolve.ts implementation.
1046
+ async evolve(
1047
+ set: { input: number[]; output: number[] }[],
1048
+ options: any
1049
+ ): Promise<{ error: number; iterations: number; time: number }> {
1050
+ const { evolveNetwork } = await import('./network/network.evolve');
1051
+ return evolveNetwork.call(this, set, options);
1052
+ }
1053
+
1054
+ /**
1055
+ * Tests the network's performance on a given dataset.
1056
+ * Calculates the average error over the dataset using a specified cost function.
1057
+ * Uses `noTraceActivate` for efficiency as gradients are not needed.
1058
+ * Handles dropout scaling if dropout was used during training.
1059
+ *
1060
+ * @param {{ input: number[]; output: number[] }[]} set - The test dataset, an array of objects with `input` and `output` arrays.
1061
+ * @param {function} [cost=methods.Cost.MSE] - The cost function to evaluate the error. Defaults to Mean Squared Error.
1062
+ * @returns {{ error: number; time: number }} An object containing the calculated average error over the dataset and the time taken for the test in milliseconds.
1063
+ */
1064
+ test(
1065
+ set: { input: number[]; output: number[] }[],
1066
+ cost?: any
1067
+ ): { error: number; time: number } {
1068
+ // Dataset dimension validation
1069
+ if (!Array.isArray(set) || set.length === 0) {
1070
+ throw new Error('Test set is empty or not an array.');
1071
+ }
1072
+ for (const sample of set) {
1073
+ if (!Array.isArray(sample.input) || sample.input.length !== this.input) {
1074
+ throw new Error(
1075
+ `Test sample input size mismatch: expected ${this.input}, got ${
1076
+ sample.input ? sample.input.length : 'undefined'
1077
+ }`
1078
+ );
1079
+ }
1080
+ if (
1081
+ !Array.isArray(sample.output) ||
1082
+ sample.output.length !== this.output
1083
+ ) {
1084
+ throw new Error(
1085
+ `Test sample output size mismatch: expected ${this.output}, got ${
1086
+ sample.output ? sample.output.length : 'undefined'
1087
+ }`
1088
+ );
1089
+ }
1090
+ }
1091
+
1092
+ let error = 0; // Accumulator for the total error.
1093
+ const costFn = cost || methods.Cost.mse; // Use provided cost function or default to MSE.
1094
+ const start = Date.now(); // Start time measurement.
1095
+
1096
+ // --- Dropout/inference transition: Explicitly reset all hidden node masks to 1 for robust inference ---
1097
+ this.nodes.forEach((node) => {
1098
+ if (node.type === 'hidden') node.mask = 1;
1099
+ });
1100
+
1101
+ const previousDropout = this.dropout; // Store current dropout rate
1102
+ if (this.dropout > 0) {
1103
+ // Temporarily disable dropout effect for testing.
1104
+ this.dropout = 0;
1105
+ }
1106
+
1107
+ // Iterate through each sample in the test set.
1108
+ set.forEach((data) => {
1109
+ // Activate the network without calculating traces.
1110
+ const output = this.noTraceActivate(data.input);
1111
+ // Calculate the error for this sample and add it to the sum.
1112
+ error += costFn(data.output, output);
1113
+ });
1114
+
1115
+ // Restore the previous dropout rate if it was changed.
1116
+ this.dropout = previousDropout;
1117
+
1118
+ // Return the average error and the time taken.
1119
+ return { error: error / set.length, time: Date.now() - start };
1120
+ }
1121
+
1122
+ /** Lightweight tuple serializer delegating to network.serialize.ts */
1123
+ serialize(): any[] {
1124
+ return _serialize.call(this);
1125
+ }
1126
+
1127
+ /**
1128
+ * Creates a Network instance from serialized data produced by `serialize()`.
1129
+ * Reconstructs the network structure and state based on the provided arrays.
1130
+ *
1131
+ * @param {any[]} data - The serialized network data array, typically obtained from `network.serialize()`.
1132
+ * Expected format: `[activations, states, squashNames, connectionData, inputSize, outputSize]`.
1133
+ * @param {number} [inputSize] - Optional input size override.
1134
+ * @param {number} [outputSize] - Optional output size override.
1135
+ * @returns {Network} A new Network instance reconstructed from the serialized data.
1136
+ * @static
1137
+ */
1138
+ /** Static lightweight tuple deserializer delegate */
1139
+ static deserialize(
1140
+ data: any[],
1141
+ inputSize?: number,
1142
+ outputSize?: number
1143
+ ): Network {
1144
+ return _deserialize(data, inputSize, outputSize);
1145
+ }
1146
+
1147
+ /**
1148
+ * Converts the network into a JSON object representation (latest standard).
1149
+ * Includes formatVersion, and only serializes properties needed for full reconstruction.
1150
+ * All references are by index. Excludes runtime-only properties (activation, state, traces).
1151
+ *
1152
+ * @returns {object} A JSON-compatible object representing the network.
1153
+ */
1154
+ /** Verbose JSON serializer delegate */
1155
+ toJSON(): object {
1156
+ return _toJSONImpl.call(this);
1157
+ }
1158
+
1159
+ /**
1160
+ * Reconstructs a network from a JSON object (latest standard).
1161
+ * Handles formatVersion, robust error handling, and index-based references.
1162
+ * @param {object} json - The JSON object representing the network.
1163
+ * @returns {Network} The reconstructed network.
1164
+ */
1165
+ /** Verbose JSON static deserializer */
1166
+ static fromJSON(json: any): Network {
1167
+ return _fromJSONImpl(json);
1168
+ }
1169
+
1170
+ /**
1171
+ * Creates a new offspring network by performing crossover between two parent networks.
1172
+ * This method implements the crossover mechanism inspired by the NEAT algorithm and described
1173
+ * in the Instinct paper, combining genes (nodes and connections) from both parents.
1174
+ * Fitness scores can influence the inheritance process. Matching genes are inherited randomly,
1175
+ * while disjoint/excess genes are typically inherited from the fitter parent (or randomly if fitness is equal or `equal` flag is set).
1176
+ *
1177
+ * @param {Network} network1 - The first parent network.
1178
+ * @param {Network} network2 - The second parent network.
1179
+ * @param {boolean} [equal=false] - If true, disjoint and excess genes are inherited randomly regardless of fitness.
1180
+ * If false (default), they are inherited from the fitter parent.
1181
+ * @returns {Network} A new Network instance representing the offspring.
1182
+ * @throws {Error} If the input or output sizes of the parent networks do not match.
1183
+ *
1184
+ * @see Instinct Algorithm - Section 2 Crossover
1185
+ * @see {@link https://medium.com/data-science/neuro-evolution-on-steroids-82bd14ddc2f6}
1186
+ * @static
1187
+ */
1188
+ /** NEAT-style crossover delegate. */
1189
+ static crossOver(
1190
+ network1: Network,
1191
+ network2: Network,
1192
+ equal: boolean = false
1193
+ ): Network {
1194
+ return _crossOver(network1, network2, equal);
1195
+ }
1196
+
1197
+ /**
1198
+ * Sets specified properties (e.g., bias, squash function) for all nodes in the network.
1199
+ * Useful for initializing or resetting node properties uniformly.
1200
+ *
1201
+ * @param {object} values - An object containing the properties and values to set.
1202
+ * @param {number} [values.bias] - If provided, sets the bias for all nodes.
1203
+ * @param {function} [values.squash] - If provided, sets the squash (activation) function for all nodes.
1204
+ * Should be a valid activation function (e.g., from `methods.Activation`).
1205
+ */
1206
+ set(values: { bias?: number; squash?: any }): void {
1207
+ // Iterate through all nodes in the network.
1208
+ this.nodes.forEach((node) => {
1209
+ // Update bias if provided in the values object.
1210
+ if (typeof values.bias !== 'undefined') {
1211
+ node.bias = values.bias;
1212
+ }
1213
+ // Update squash function if provided.
1214
+ if (typeof values.squash !== 'undefined') {
1215
+ node.squash = values.squash;
1216
+ }
1217
+ });
1218
+ }
1219
+
1220
+ /**
1221
+ * Exports the network to ONNX format (JSON object, minimal MLP support).
1222
+ * Only standard feedforward architectures and standard activations are supported.
1223
+ * Gating, custom activations, and evolutionary features are ignored or replaced with Identity.
1224
+ *
1225
+ * @returns {import('./onnx').OnnxModel} ONNX model as a JSON object.
1226
+ */
1227
+ toONNX() {
1228
+ return exportToONNX(this);
1229
+ }
1230
+
1231
+ /**
1232
+ * Creates a fully connected, strictly layered MLP network.
1233
+ * @param {number} inputCount - Number of input nodes
1234
+ * @param {number[]} hiddenCounts - Array of hidden layer sizes (e.g. [2,3] for two hidden layers)
1235
+ * @param {number} outputCount - Number of output nodes
1236
+ * @returns {Network} A new, fully connected, layered MLP
1237
+ */
1238
+ static createMLP(
1239
+ inputCount: number,
1240
+ hiddenCounts: number[],
1241
+ outputCount: number
1242
+ ): Network {
1243
+ // Create all nodes
1244
+ const inputNodes = Array.from(
1245
+ { length: inputCount },
1246
+ () => new Node('input')
1247
+ );
1248
+ const hiddenLayers: Node[][] = hiddenCounts.map((count) =>
1249
+ Array.from({ length: count }, () => new Node('hidden'))
1250
+ );
1251
+ const outputNodes = Array.from(
1252
+ { length: outputCount },
1253
+ () => new Node('output')
1254
+ );
1255
+ // Flatten all nodes in topological order
1256
+ const allNodes = [...inputNodes, ...hiddenLayers.flat(), ...outputNodes];
1257
+ // Create network instance
1258
+ const net = new Network(inputCount, outputCount);
1259
+ net.nodes = allNodes;
1260
+ // Connect layers
1261
+ let prevLayer = inputNodes;
1262
+ for (const layer of hiddenLayers) {
1263
+ for (const to of layer) {
1264
+ for (const from of prevLayer) {
1265
+ from.connect(to);
1266
+ }
1267
+ }
1268
+ prevLayer = layer;
1269
+ }
1270
+ // Connect last hidden (or input if no hidden) to output
1271
+ for (const to of outputNodes) {
1272
+ for (const from of prevLayer) {
1273
+ from.connect(to);
1274
+ }
1275
+ }
1276
+ // Rebuild net.connections from all per-node connections
1277
+ net.connections = net.nodes.flatMap((n) => n.connections.out);
1278
+ net._topoDirty = true;
1279
+ return net;
1280
+ }
1281
+
1282
+ /**
1283
+ * Rebuilds the network's connections array from all per-node connections.
1284
+ * This ensures that the network.connections array is consistent with the actual
1285
+ * outgoing connections of all nodes. Useful after manual wiring or node manipulation.
1286
+ *
1287
+ * @param {Network} net - The network instance to rebuild connections for.
1288
+ * @returns {void}
1289
+ *
1290
+ * Example usage:
1291
+ * Network.rebuildConnections(net);
1292
+ */
1293
+ static rebuildConnections(net: Network): void {
1294
+ const allConnections = new Set<Connection>();
1295
+ net.nodes.forEach((node) => {
1296
+ node.connections.out.forEach((conn) => {
1297
+ allConnections.add(conn);
1298
+ });
1299
+ });
1300
+ net.connections = Array.from(allConnections) as Connection[];
1301
+ }
1302
+ }