@reicek/neataptic-ts 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (272) hide show
  1. package/.github/ISSUE_TEMPLATE/bug_report.md +33 -0
  2. package/.github/ISSUE_TEMPLATE/feature_request.md +27 -0
  3. package/.github/PULL_REQUEST_TEMPLATE.md +28 -0
  4. package/.github/workflows/ci.yml +41 -0
  5. package/.github/workflows/deploy-pages.yml +29 -0
  6. package/.github/workflows/manual_release_pipeline.yml +62 -0
  7. package/.github/workflows/publish.yml +85 -0
  8. package/.github/workflows/release_dispatch.yml +38 -0
  9. package/.travis.yml +5 -0
  10. package/CONTRIBUTING.md +92 -0
  11. package/LICENSE +24 -0
  12. package/ONNX_EXPORT.md +87 -0
  13. package/README.md +1173 -0
  14. package/RELEASE.md +54 -0
  15. package/dist-docs/package.json +1 -0
  16. package/dist-docs/scripts/generate-docs.d.ts +2 -0
  17. package/dist-docs/scripts/generate-docs.d.ts.map +1 -0
  18. package/dist-docs/scripts/generate-docs.js +536 -0
  19. package/dist-docs/scripts/generate-docs.js.map +1 -0
  20. package/dist-docs/scripts/render-docs-html.d.ts +2 -0
  21. package/dist-docs/scripts/render-docs-html.d.ts.map +1 -0
  22. package/dist-docs/scripts/render-docs-html.js +148 -0
  23. package/dist-docs/scripts/render-docs-html.js.map +1 -0
  24. package/docs/FOLDERS.md +14 -0
  25. package/docs/README.md +1173 -0
  26. package/docs/architecture/README.md +1391 -0
  27. package/docs/architecture/index.html +938 -0
  28. package/docs/architecture/network/README.md +1210 -0
  29. package/docs/architecture/network/index.html +908 -0
  30. package/docs/assets/ascii-maze.bundle.js +16542 -0
  31. package/docs/assets/ascii-maze.bundle.js.map +7 -0
  32. package/docs/index.html +1419 -0
  33. package/docs/methods/README.md +670 -0
  34. package/docs/methods/index.html +477 -0
  35. package/docs/multithreading/README.md +274 -0
  36. package/docs/multithreading/index.html +215 -0
  37. package/docs/multithreading/workers/README.md +23 -0
  38. package/docs/multithreading/workers/browser/README.md +39 -0
  39. package/docs/multithreading/workers/browser/index.html +70 -0
  40. package/docs/multithreading/workers/index.html +57 -0
  41. package/docs/multithreading/workers/node/README.md +33 -0
  42. package/docs/multithreading/workers/node/index.html +66 -0
  43. package/docs/neat/README.md +1284 -0
  44. package/docs/neat/index.html +906 -0
  45. package/docs/src/README.md +2659 -0
  46. package/docs/src/index.html +1579 -0
  47. package/jest.config.ts +32 -0
  48. package/package.json +99 -0
  49. package/plans/HyperMorphoNEAT.md +293 -0
  50. package/plans/ONNX_EXPORT_PLAN.md +46 -0
  51. package/scripts/generate-docs.ts +486 -0
  52. package/scripts/render-docs-html.ts +138 -0
  53. package/scripts/types.d.ts +2 -0
  54. package/src/README.md +2659 -0
  55. package/src/architecture/README.md +1391 -0
  56. package/src/architecture/activationArrayPool.ts +135 -0
  57. package/src/architecture/architect.ts +635 -0
  58. package/src/architecture/connection.ts +148 -0
  59. package/src/architecture/group.ts +406 -0
  60. package/src/architecture/layer.ts +804 -0
  61. package/src/architecture/network/README.md +1210 -0
  62. package/src/architecture/network/network.activate.ts +223 -0
  63. package/src/architecture/network/network.connect.ts +157 -0
  64. package/src/architecture/network/network.deterministic.ts +167 -0
  65. package/src/architecture/network/network.evolve.ts +426 -0
  66. package/src/architecture/network/network.gating.ts +186 -0
  67. package/src/architecture/network/network.genetic.ts +247 -0
  68. package/src/architecture/network/network.mutate.ts +624 -0
  69. package/src/architecture/network/network.onnx.ts +463 -0
  70. package/src/architecture/network/network.prune.ts +216 -0
  71. package/src/architecture/network/network.remove.ts +96 -0
  72. package/src/architecture/network/network.serialize.ts +309 -0
  73. package/src/architecture/network/network.slab.ts +262 -0
  74. package/src/architecture/network/network.standalone.ts +246 -0
  75. package/src/architecture/network/network.stats.ts +59 -0
  76. package/src/architecture/network/network.topology.ts +86 -0
  77. package/src/architecture/network/network.training.ts +1278 -0
  78. package/src/architecture/network.ts +1302 -0
  79. package/src/architecture/node.ts +1288 -0
  80. package/src/architecture/onnx.ts +3 -0
  81. package/src/config.ts +83 -0
  82. package/src/methods/README.md +670 -0
  83. package/src/methods/activation.ts +372 -0
  84. package/src/methods/connection.ts +31 -0
  85. package/src/methods/cost.ts +347 -0
  86. package/src/methods/crossover.ts +63 -0
  87. package/src/methods/gating.ts +43 -0
  88. package/src/methods/methods.ts +8 -0
  89. package/src/methods/mutation.ts +300 -0
  90. package/src/methods/rate.ts +257 -0
  91. package/src/methods/selection.ts +65 -0
  92. package/src/multithreading/README.md +274 -0
  93. package/src/multithreading/multi.ts +339 -0
  94. package/src/multithreading/workers/README.md +23 -0
  95. package/src/multithreading/workers/browser/README.md +39 -0
  96. package/src/multithreading/workers/browser/testworker.ts +99 -0
  97. package/src/multithreading/workers/node/README.md +33 -0
  98. package/src/multithreading/workers/node/testworker.ts +72 -0
  99. package/src/multithreading/workers/node/worker.ts +70 -0
  100. package/src/multithreading/workers/workers.ts +22 -0
  101. package/src/neat/README.md +1284 -0
  102. package/src/neat/neat.adaptive.ts +544 -0
  103. package/src/neat/neat.compat.ts +164 -0
  104. package/src/neat/neat.constants.ts +20 -0
  105. package/src/neat/neat.diversity.ts +217 -0
  106. package/src/neat/neat.evaluate.ts +328 -0
  107. package/src/neat/neat.evolve.ts +1026 -0
  108. package/src/neat/neat.export.ts +249 -0
  109. package/src/neat/neat.helpers.ts +235 -0
  110. package/src/neat/neat.lineage.ts +220 -0
  111. package/src/neat/neat.multiobjective.ts +260 -0
  112. package/src/neat/neat.mutation.ts +718 -0
  113. package/src/neat/neat.objectives.ts +157 -0
  114. package/src/neat/neat.pruning.ts +190 -0
  115. package/src/neat/neat.selection.ts +269 -0
  116. package/src/neat/neat.speciation.ts +460 -0
  117. package/src/neat/neat.species.ts +151 -0
  118. package/src/neat/neat.telemetry.exports.ts +469 -0
  119. package/src/neat/neat.telemetry.ts +933 -0
  120. package/src/neat/neat.types.ts +275 -0
  121. package/src/neat.ts +1042 -0
  122. package/src/neataptic.ts +10 -0
  123. package/test/architecture/activationArrayPool.capacity.test.ts +19 -0
  124. package/test/architecture/activationArrayPool.test.ts +46 -0
  125. package/test/architecture/connection.test.ts +290 -0
  126. package/test/architecture/group.test.ts +950 -0
  127. package/test/architecture/layer.test.ts +1535 -0
  128. package/test/architecture/network.pruning.test.ts +65 -0
  129. package/test/architecture/node.test.ts +1602 -0
  130. package/test/examples/asciiMaze/asciiMaze.e2e.test.ts +499 -0
  131. package/test/examples/asciiMaze/asciiMaze.ts +41 -0
  132. package/test/examples/asciiMaze/browser-entry.ts +164 -0
  133. package/test/examples/asciiMaze/browserLogger.ts +221 -0
  134. package/test/examples/asciiMaze/browserTerminalUtility.ts +48 -0
  135. package/test/examples/asciiMaze/colors.ts +119 -0
  136. package/test/examples/asciiMaze/dashboardManager.ts +968 -0
  137. package/test/examples/asciiMaze/evolutionEngine.ts +1248 -0
  138. package/test/examples/asciiMaze/fitness.ts +136 -0
  139. package/test/examples/asciiMaze/index.html +128 -0
  140. package/test/examples/asciiMaze/index.ts +26 -0
  141. package/test/examples/asciiMaze/interfaces.ts +235 -0
  142. package/test/examples/asciiMaze/mazeMovement.ts +996 -0
  143. package/test/examples/asciiMaze/mazeUtils.ts +278 -0
  144. package/test/examples/asciiMaze/mazeVision.ts +402 -0
  145. package/test/examples/asciiMaze/mazeVisualization.ts +585 -0
  146. package/test/examples/asciiMaze/mazes.ts +245 -0
  147. package/test/examples/asciiMaze/networkRefinement.ts +76 -0
  148. package/test/examples/asciiMaze/networkVisualization.ts +901 -0
  149. package/test/examples/asciiMaze/terminalUtility.ts +73 -0
  150. package/test/methods/activation.test.ts +1142 -0
  151. package/test/methods/connection.test.ts +146 -0
  152. package/test/methods/cost.test.ts +1123 -0
  153. package/test/methods/crossover.test.ts +202 -0
  154. package/test/methods/gating.test.ts +144 -0
  155. package/test/methods/mutation.test.ts +451 -0
  156. package/test/methods/optimizers.advanced.test.ts +80 -0
  157. package/test/methods/optimizers.behavior.test.ts +105 -0
  158. package/test/methods/optimizers.formula.test.ts +89 -0
  159. package/test/methods/rate.cosineWarmRestarts.test.ts +44 -0
  160. package/test/methods/rate.linearWarmupDecay.test.ts +41 -0
  161. package/test/methods/rate.reduceOnPlateau.test.ts +45 -0
  162. package/test/methods/rate.test.ts +684 -0
  163. package/test/methods/selection.test.ts +245 -0
  164. package/test/multithreading/activations.functions.test.ts +54 -0
  165. package/test/multithreading/multi.test.ts +290 -0
  166. package/test/multithreading/worker.node.process.test.ts +39 -0
  167. package/test/multithreading/workers.coverage.test.ts +36 -0
  168. package/test/multithreading/workers.dynamic.import.test.ts +8 -0
  169. package/test/neat/neat.adaptive.complexityBudget.test.ts +34 -0
  170. package/test/neat/neat.adaptive.criterion.complexity.test.ts +50 -0
  171. package/test/neat/neat.adaptive.mutation.strategy.test.ts +37 -0
  172. package/test/neat/neat.adaptive.operator.decay.test.ts +31 -0
  173. package/test/neat/neat.adaptive.phasedComplexity.test.ts +25 -0
  174. package/test/neat/neat.adaptive.pruning.test.ts +25 -0
  175. package/test/neat/neat.adaptive.targetSpecies.test.ts +43 -0
  176. package/test/neat/neat.additional.coverage.test.ts +126 -0
  177. package/test/neat/neat.advanced.enhancements.test.ts +85 -0
  178. package/test/neat/neat.advanced.test.ts +589 -0
  179. package/test/neat/neat.diversity.autocompat.test.ts +47 -0
  180. package/test/neat/neat.diversity.metrics.test.ts +21 -0
  181. package/test/neat/neat.diversity.stats.test.ts +44 -0
  182. package/test/neat/neat.enhancements.test.ts +79 -0
  183. package/test/neat/neat.entropy.ancestorAdaptive.test.ts +133 -0
  184. package/test/neat/neat.entropy.compat.csv.test.ts +108 -0
  185. package/test/neat/neat.evolution.pruning.test.ts +39 -0
  186. package/test/neat/neat.fastmode.autotune.test.ts +42 -0
  187. package/test/neat/neat.innovation.test.ts +134 -0
  188. package/test/neat/neat.lineage.antibreeding.test.ts +35 -0
  189. package/test/neat/neat.lineage.entropy.test.ts +56 -0
  190. package/test/neat/neat.lineage.inbreeding.test.ts +49 -0
  191. package/test/neat/neat.lineage.pressure.test.ts +29 -0
  192. package/test/neat/neat.multiobjective.adaptive.test.ts +57 -0
  193. package/test/neat/neat.multiobjective.dynamic.schedule.test.ts +46 -0
  194. package/test/neat/neat.multiobjective.dynamic.test.ts +31 -0
  195. package/test/neat/neat.multiobjective.fastsort.delegation.test.ts +51 -0
  196. package/test/neat/neat.multiobjective.prune.test.ts +39 -0
  197. package/test/neat/neat.multiobjective.test.ts +21 -0
  198. package/test/neat/neat.mutation.undefined.pool.test.ts +24 -0
  199. package/test/neat/neat.objective.events.test.ts +26 -0
  200. package/test/neat/neat.objective.importance.test.ts +21 -0
  201. package/test/neat/neat.objective.lifetimes.test.ts +33 -0
  202. package/test/neat/neat.offspring.allocation.test.ts +22 -0
  203. package/test/neat/neat.operator.bandit.test.ts +17 -0
  204. package/test/neat/neat.operator.phases.test.ts +38 -0
  205. package/test/neat/neat.pruneInactive.behavior.test.ts +54 -0
  206. package/test/neat/neat.reenable.adaptation.test.ts +18 -0
  207. package/test/neat/neat.rng.state.test.ts +22 -0
  208. package/test/neat/neat.spawn.add.test.ts +123 -0
  209. package/test/neat/neat.speciation.test.ts +96 -0
  210. package/test/neat/neat.species.allocation.telemetry.test.ts +26 -0
  211. package/test/neat/neat.species.history.csv.test.ts +24 -0
  212. package/test/neat/neat.telemetry.advanced.test.ts +226 -0
  213. package/test/neat/neat.telemetry.csv.lineage.test.ts +19 -0
  214. package/test/neat/neat.telemetry.parity.test.ts +42 -0
  215. package/test/neat/neat.telemetry.stream.test.ts +19 -0
  216. package/test/neat/neat.telemetry.test.ts +16 -0
  217. package/test/neat/neat.test.ts +422 -0
  218. package/test/neat/neat.utilities.test.ts +44 -0
  219. package/test/network/__suppress_console.ts +9 -0
  220. package/test/network/acyclic.topoorder.test.ts +17 -0
  221. package/test/network/checkpoint.metricshook.test.ts +36 -0
  222. package/test/network/error.handling.test.ts +581 -0
  223. package/test/network/evolution.test.ts +285 -0
  224. package/test/network/genetic.test.ts +208 -0
  225. package/test/network/learning.capability.test.ts +244 -0
  226. package/test/network/mutation.effects.test.ts +492 -0
  227. package/test/network/network.activate.test.ts +115 -0
  228. package/test/network/network.activateBatch.test.ts +30 -0
  229. package/test/network/network.deterministic.test.ts +64 -0
  230. package/test/network/network.evolve.branches.test.ts +75 -0
  231. package/test/network/network.evolve.multithread.branches.test.ts +83 -0
  232. package/test/network/network.evolve.test.ts +100 -0
  233. package/test/network/network.gating.removal.test.ts +93 -0
  234. package/test/network/network.mutate.additional.test.ts +145 -0
  235. package/test/network/network.mutate.edgecases.test.ts +101 -0
  236. package/test/network/network.mutate.test.ts +101 -0
  237. package/test/network/network.prune.earlyexit.test.ts +38 -0
  238. package/test/network/network.remove.errors.test.ts +45 -0
  239. package/test/network/network.slab.fallbacks.test.ts +22 -0
  240. package/test/network/network.stats.test.ts +45 -0
  241. package/test/network/network.training.advanced.test.ts +149 -0
  242. package/test/network/network.training.basic.test.ts +228 -0
  243. package/test/network/network.training.helpers.test.ts +183 -0
  244. package/test/network/onnx.export.test.ts +310 -0
  245. package/test/network/onnx.import.test.ts +129 -0
  246. package/test/network/pruning.topology.test.ts +282 -0
  247. package/test/network/regularization.determinism.test.ts +83 -0
  248. package/test/network/regularization.dropconnect.test.ts +17 -0
  249. package/test/network/regularization.dropconnect.validation.test.ts +18 -0
  250. package/test/network/regularization.stochasticdepth.test.ts +27 -0
  251. package/test/network/regularization.test.ts +843 -0
  252. package/test/network/regularization.weightnoise.test.ts +30 -0
  253. package/test/network/setupTests.ts +2 -0
  254. package/test/network/standalone.test.ts +332 -0
  255. package/test/network/structure.serialization.test.ts +660 -0
  256. package/test/training/training.determinism.mixed-precision.test.ts +134 -0
  257. package/test/training/training.earlystopping.test.ts +91 -0
  258. package/test/training/training.edge-cases.test.ts +91 -0
  259. package/test/training/training.extensions.test.ts +47 -0
  260. package/test/training/training.gradient.features.test.ts +110 -0
  261. package/test/training/training.gradient.refinements.test.ts +170 -0
  262. package/test/training/training.gradient.separate-bias.test.ts +41 -0
  263. package/test/training/training.optimizer.test.ts +48 -0
  264. package/test/training/training.plateau.smoothing.test.ts +58 -0
  265. package/test/training/training.smoothing.types.test.ts +174 -0
  266. package/test/training/training.train.options.coverage.test.ts +52 -0
  267. package/test/utils/console-helper.ts +76 -0
  268. package/test/utils/jest-setup.ts +60 -0
  269. package/test/utils/test-helpers.ts +175 -0
  270. package/tsconfig.docs.json +12 -0
  271. package/tsconfig.json +21 -0
  272. package/webpack.config.js +49 -0
@@ -0,0 +1,670 @@
1
+ # methods
2
+
3
+ ## methods/activation.ts
4
+
5
+ ### Activation
6
+
7
+ ### registerCustomActivation
8
+
9
+ `(name: string, fn: (x: number, derivate?: boolean | undefined) => number) => void`
10
+
11
+ Register a custom activation function at runtime.
12
+
13
+ Parameters:
14
+ - `` - - Name for the custom activation.
15
+ - `` - - The activation function (should handle derivative if needed).
16
+
17
+ ## methods/connection.ts
18
+
19
+ ### connection
20
+
21
+ Export the connection object as the default export.
22
+
23
+ ### groupConnection
24
+
25
+ ## methods/cost.ts
26
+
27
+ ### cost
28
+
29
+ ### default
30
+
31
+ #### binary
32
+
33
+ `(targets: number[], outputs: number[]) => number`
34
+
35
+ Calculates the Binary Error rate, often used as a simple accuracy metric for classification.
36
+
37
+ This function calculates the proportion of misclassifications by comparing the
38
+ rounded network outputs (thresholded at 0.5) against the target labels.
39
+ It assumes target values are 0 or 1, and outputs are probabilities between 0 and 1.
40
+ Note: This is equivalent to `1 - accuracy` for binary classification.
41
+
42
+ Parameters:
43
+ - `` - - An array of target values, expected to be 0 or 1.
44
+ - `` - - An array of output values from the network, typically probabilities between 0 and 1.
45
+
46
+ Returns: The proportion of misclassified samples (error rate, between 0 and 1).
47
+
48
+ #### crossEntropy
49
+
50
+ `(targets: number[], outputs: number[]) => number`
51
+
52
+ Calculates the Cross Entropy error, commonly used for classification tasks.
53
+
54
+ This function measures the performance of a classification model whose output is
55
+ a probability value between 0 and 1. Cross-entropy loss increases as the
56
+ predicted probability diverges from the actual label.
57
+
58
+ It uses a small epsilon (PROB_EPSILON = 1e-15) to prevent `log(0)` which would result in `NaN`.
59
+ Output values are clamped to the range `[epsilon, 1 - epsilon]` for numerical stability.
60
+
61
+ Parameters:
62
+ - `` - - An array of target values, typically 0 or 1 for binary classification, or probabilities for soft labels.
63
+ - `` - - An array of output values from the network, representing probabilities (expected to be between 0 and 1).
64
+
65
+ Returns: The mean cross-entropy error over all samples.
66
+
67
+ #### focalLoss
68
+
69
+ `(targets: number[], outputs: number[], gamma: number, alpha: number) => number`
70
+
71
+ Calculates the Focal Loss, which is useful for addressing class imbalance in classification tasks.
72
+ Focal loss down-weights easy examples and focuses training on hard negatives.
73
+
74
+ Parameters:
75
+ - `` - - Array of target values (0 or 1 for binary, or probabilities for soft labels).
76
+ - `` - - Array of predicted probabilities (between 0 and 1).
77
+ - `` - - Focusing parameter (default 2).
78
+ - `` - - Balancing parameter (default 0.25).
79
+
80
+ Returns: The mean focal loss.
81
+
82
+ #### hinge
83
+
84
+ `(targets: number[], outputs: number[]) => number`
85
+
86
+ Calculates the Mean Hinge loss, primarily used for "maximum-margin" classification,
87
+ most notably for Support Vector Machines (SVMs).
88
+
89
+ Hinge loss is used for training classifiers. It penalizes predictions that are
90
+ not only incorrect but also those that are correct but not confident (i.e., close to the decision boundary).
91
+ Assumes target values are encoded as -1 or 1.
92
+
93
+ Parameters:
94
+ - `` - - An array of target values, expected to be -1 or 1.
95
+ - `` - - An array of output values from the network (raw scores, not necessarily probabilities).
96
+
97
+ Returns: The mean hinge loss.
98
+
99
+ #### labelSmoothing
100
+
101
+ `(targets: number[], outputs: number[], smoothing: number) => number`
102
+
103
+ Calculates the Cross Entropy with Label Smoothing.
104
+ Label smoothing prevents the model from becoming overconfident by softening the targets.
105
+
106
+ Parameters:
107
+ - `` - - Array of target values (0 or 1 for binary, or probabilities for soft labels).
108
+ - `` - - Array of predicted probabilities (between 0 and 1).
109
+ - `` - - Smoothing factor (between 0 and 1, e.g., 0.1).
110
+
111
+ Returns: The mean cross-entropy loss with label smoothing.
112
+
113
+ #### mae
114
+
115
+ `(targets: number[], outputs: number[]) => number`
116
+
117
+ Calculates the Mean Absolute Error (MAE), another common loss function for regression tasks.
118
+
119
+ MAE measures the average of the absolute differences between predictions and actual values.
120
+ Compared to MSE, it is less sensitive to outliers because errors are not squared.
121
+
122
+ Parameters:
123
+ - `` - - An array of target numerical values.
124
+ - `` - - An array of output values from the network.
125
+
126
+ Returns: The mean absolute error.
127
+
128
+ #### mape
129
+
130
+ `(targets: number[], outputs: number[]) => number`
131
+
132
+ Calculates the Mean Absolute Percentage Error (MAPE).
133
+
134
+ MAPE expresses the error as a percentage of the actual value. It can be useful
135
+ for understanding the error relative to the magnitude of the target values.
136
+ However, it has limitations: it's undefined when the target value is zero and
137
+ can be skewed by target values close to zero.
138
+
139
+ Parameters:
140
+ - `` - - An array of target numerical values. Should not contain zeros for standard MAPE.
141
+ - `` - - An array of output values from the network.
142
+
143
+ Returns: The mean absolute percentage error, expressed as a proportion (e.g., 0.1 for 10%).
144
+
145
+ #### mse
146
+
147
+ `(targets: number[], outputs: number[]) => number`
148
+
149
+ Calculates the Mean Squared Error (MSE), a common loss function for regression tasks.
150
+
151
+ MSE measures the average of the squares of the errors—that is, the average
152
+ squared difference between the estimated values and the actual value.
153
+ It is sensitive to outliers due to the squaring of the error terms.
154
+
155
+ Parameters:
156
+ - `` - - An array of target numerical values.
157
+ - `` - - An array of output values from the network.
158
+
159
+ Returns: The mean squared error.
160
+
161
+ #### msle
162
+
163
+ `(targets: number[], outputs: number[]) => number`
164
+
165
+ Calculates the Mean Squared Logarithmic Error (MSLE).
166
+
167
+ MSLE is often used in regression tasks where the target values span a large range
168
+ or when penalizing under-predictions more than over-predictions is desired.
169
+ It measures the squared difference between the logarithms of the predicted and actual values.
170
+ Uses `log(1 + x)` instead of `log(x)` for numerical stability and to handle inputs of 0.
171
+ Assumes both targets and outputs are non-negative.
172
+
173
+ Parameters:
174
+ - `` - - An array of target numerical values (assumed >= 0).
175
+ - `` - - An array of output values from the network (assumed >= 0).
176
+
177
+ Returns: The mean squared logarithmic error.
178
+
179
+ #### softmaxCrossEntropy
180
+
181
+ `(targets: number[], outputs: number[]) => number`
182
+
183
+ Softmax Cross Entropy for mutually exclusive multi-class outputs given raw (pre-softmax or arbitrary) scores.
184
+ Applies a numerically stable softmax to the outputs internally then computes -sum(target * log(prob)).
185
+ Targets may be soft labels and are expected to sum to 1 (will be re-normalized if not).
186
+
187
+ ## methods/crossover.ts
188
+
189
+ ### crossover
190
+
191
+ ## methods/gating.ts
192
+
193
+ ### gating
194
+
195
+ ## methods/methods.ts
196
+
197
+ ### Activation
198
+
199
+ ### crossover
200
+
201
+ ### gating
202
+
203
+ ### groupConnection
204
+
205
+ ### methods
206
+
207
+ Provides various methods for implementing learning rate schedules.
208
+
209
+ Learning rate schedules dynamically adjust the learning rate during the training
210
+ process of machine learning models, particularly neural networks. Adjusting the
211
+ learning rate can significantly impact training speed and performance. A high
212
+ rate might lead to overshooting the optimal solution, while a very low rate
213
+ can result in slow convergence or getting stuck in local minima. These methods
214
+ offer different strategies to balance exploration and exploitation during training.
215
+
216
+ ### mutation
217
+
218
+ ### selection
219
+
220
+ ### default
221
+
222
+ #### binary
223
+
224
+ `(targets: number[], outputs: number[]) => number`
225
+
226
+ Calculates the Binary Error rate, often used as a simple accuracy metric for classification.
227
+
228
+ This function calculates the proportion of misclassifications by comparing the
229
+ rounded network outputs (thresholded at 0.5) against the target labels.
230
+ It assumes target values are 0 or 1, and outputs are probabilities between 0 and 1.
231
+ Note: This is equivalent to `1 - accuracy` for binary classification.
232
+
233
+ Parameters:
234
+ - `` - - An array of target values, expected to be 0 or 1.
235
+ - `` - - An array of output values from the network, typically probabilities between 0 and 1.
236
+
237
+ Returns: The proportion of misclassified samples (error rate, between 0 and 1).
238
+
239
+ #### cosineAnnealing
240
+
241
+ `(period: number, minRate: number) => (baseRate: number, iteration: number) => number`
242
+
243
+ Implements a Cosine Annealing learning rate schedule.
244
+
245
+ This schedule varies the learning rate cyclically according to a cosine function.
246
+ It starts at the `baseRate` and smoothly anneals down to `minRate` over a
247
+ specified `period` of iterations, then potentially repeats. This can help
248
+ the model escape local minima and explore the loss landscape more effectively.
249
+ Often used with "warm restarts" where the cycle repeats.
250
+
251
+ Formula: `learning_rate = minRate + 0.5 * (baseRate - minRate) * (1 + cos(pi * current_cycle_iteration / period))`
252
+
253
+ Parameters:
254
+ - `period` - The number of iterations over which the learning rate anneals from `baseRate` to `minRate` in one cycle. Defaults to 1000.
255
+ - `minRate` - The minimum learning rate value at the end of a cycle. Defaults to 0.
256
+ - `baseRate` - The initial (maximum) learning rate for the cycle.
257
+ - `iteration` - The current training iteration.
258
+
259
+ Returns: A function that calculates the learning rate for a given iteration based on the cosine annealing schedule.
260
+
261
+ #### cosineAnnealingWarmRestarts
262
+
263
+ `(initialPeriod: number, minRate: number, tMult: number) => (baseRate: number, iteration: number) => number`
264
+
265
+ Cosine Annealing with Warm Restarts (SGDR style) where the cycle length can grow by a multiplier (tMult) after each restart.
266
+
267
+ Parameters:
268
+ - `initialPeriod` - Length of the first cycle in iterations.
269
+ - `minRate` - Minimum learning rate at valley.
270
+ - `tMult` - Factor to multiply the period after each restart (>=1).
271
+
272
+ #### crossEntropy
273
+
274
+ `(targets: number[], outputs: number[]) => number`
275
+
276
+ Calculates the Cross Entropy error, commonly used for classification tasks.
277
+
278
+ This function measures the performance of a classification model whose output is
279
+ a probability value between 0 and 1. Cross-entropy loss increases as the
280
+ predicted probability diverges from the actual label.
281
+
282
+ It uses a small epsilon (PROB_EPSILON = 1e-15) to prevent `log(0)` which would result in `NaN`.
283
+ Output values are clamped to the range `[epsilon, 1 - epsilon]` for numerical stability.
284
+
285
+ Parameters:
286
+ - `` - - An array of target values, typically 0 or 1 for binary classification, or probabilities for soft labels.
287
+ - `` - - An array of output values from the network, representing probabilities (expected to be between 0 and 1).
288
+
289
+ Returns: The mean cross-entropy error over all samples.
290
+
291
+ #### exp
292
+
293
+ `(gamma: number) => (baseRate: number, iteration: number) => number`
294
+
295
+ Implements an exponential decay learning rate schedule.
296
+
297
+ The learning rate decreases exponentially after each iteration, multiplying
298
+ by the decay factor `gamma`. This provides a smooth, continuous reduction
299
+ in the learning rate over time.
300
+
301
+ Formula: `learning_rate = baseRate * gamma ^ iteration`
302
+
303
+ Parameters:
304
+ - `gamma` - The decay factor applied at each iteration. Should be less than 1. Defaults to 0.999.
305
+ - `baseRate` - The initial learning rate.
306
+ - `iteration` - The current training iteration.
307
+
308
+ Returns: A function that calculates the exponentially decayed learning rate for a given iteration.
309
+
310
+ #### fixed
311
+
312
+ `() => (baseRate: number, iteration: number) => number`
313
+
314
+ Implements a fixed learning rate schedule.
315
+
316
+ The learning rate remains constant throughout the entire training process.
317
+ This is the simplest schedule and serves as a baseline, but may not be
318
+ optimal for complex problems.
319
+
320
+ Parameters:
321
+ - `baseRate` - The initial learning rate, which will remain constant.
322
+ - `iteration` - The current training iteration (unused in this method, but included for consistency).
323
+
324
+ Returns: A function that takes the base learning rate and the current iteration number, and always returns the base learning rate.
325
+
326
+ #### focalLoss
327
+
328
+ `(targets: number[], outputs: number[], gamma: number, alpha: number) => number`
329
+
330
+ Calculates the Focal Loss, which is useful for addressing class imbalance in classification tasks.
331
+ Focal loss down-weights easy examples and focuses training on hard negatives.
332
+
333
+ Parameters:
334
+ - `` - - Array of target values (0 or 1 for binary, or probabilities for soft labels).
335
+ - `` - - Array of predicted probabilities (between 0 and 1).
336
+ - `` - - Focusing parameter (default 2).
337
+ - `` - - Balancing parameter (default 0.25).
338
+
339
+ Returns: The mean focal loss.
340
+
341
+ #### hinge
342
+
343
+ `(targets: number[], outputs: number[]) => number`
344
+
345
+ Calculates the Mean Hinge loss, primarily used for "maximum-margin" classification,
346
+ most notably for Support Vector Machines (SVMs).
347
+
348
+ Hinge loss is used for training classifiers. It penalizes predictions that are
349
+ not only incorrect but also those that are correct but not confident (i.e., close to the decision boundary).
350
+ Assumes target values are encoded as -1 or 1.
351
+
352
+ Parameters:
353
+ - `` - - An array of target values, expected to be -1 or 1.
354
+ - `` - - An array of output values from the network (raw scores, not necessarily probabilities).
355
+
356
+ Returns: The mean hinge loss.
357
+
358
+ #### inv
359
+
360
+ `(gamma: number, power: number) => (baseRate: number, iteration: number) => number`
361
+
362
+ Implements an inverse decay learning rate schedule.
363
+
364
+ The learning rate decreases as the inverse of the iteration number,
365
+ controlled by the decay factor `gamma` and exponent `power`. The rate
366
+ decreases more slowly over time compared to exponential decay.
367
+
368
+ Formula: `learning_rate = baseRate / (1 + gamma * Math.pow(iteration, power))`
369
+
370
+ Parameters:
371
+ - `gamma` - Controls the rate of decay. Higher values lead to faster decay. Defaults to 0.001.
372
+ - `power` - The exponent controlling the shape of the decay curve. Defaults to 2.
373
+ - `baseRate` - The initial learning rate.
374
+ - `iteration` - The current training iteration.
375
+
376
+ Returns: A function that calculates the inversely decayed learning rate for a given iteration.
377
+
378
+ #### labelSmoothing
379
+
380
+ `(targets: number[], outputs: number[], smoothing: number) => number`
381
+
382
+ Calculates the Cross Entropy with Label Smoothing.
383
+ Label smoothing prevents the model from becoming overconfident by softening the targets.
384
+
385
+ Parameters:
386
+ - `` - - Array of target values (0 or 1 for binary, or probabilities for soft labels).
387
+ - `` - - Array of predicted probabilities (between 0 and 1).
388
+ - `` - - Smoothing factor (between 0 and 1, e.g., 0.1).
389
+
390
+ Returns: The mean cross-entropy loss with label smoothing.
391
+
392
+ #### linearWarmupDecay
393
+
394
+ `(totalSteps: number, warmupSteps: number | undefined, endRate: number) => (baseRate: number, iteration: number) => number`
395
+
396
+ Linear Warmup followed by Linear Decay to an end rate.
397
+ Warmup linearly increases LR from near 0 up to baseRate over warmupSteps, then linearly decays to endRate at totalSteps.
398
+ Iterations beyond totalSteps clamp to endRate.
399
+
400
+ Parameters:
401
+ - `totalSteps` - Total steps for full schedule (must be > 0).
402
+ - `warmupSteps` - Steps for warmup (< totalSteps). Defaults to 10% of totalSteps.
403
+ - `endRate` - Final rate at totalSteps.
404
+
405
+ #### mae
406
+
407
+ `(targets: number[], outputs: number[]) => number`
408
+
409
+ Calculates the Mean Absolute Error (MAE), another common loss function for regression tasks.
410
+
411
+ MAE measures the average of the absolute differences between predictions and actual values.
412
+ Compared to MSE, it is less sensitive to outliers because errors are not squared.
413
+
414
+ Parameters:
415
+ - `` - - An array of target numerical values.
416
+ - `` - - An array of output values from the network.
417
+
418
+ Returns: The mean absolute error.
419
+
420
+ #### mape
421
+
422
+ `(targets: number[], outputs: number[]) => number`
423
+
424
+ Calculates the Mean Absolute Percentage Error (MAPE).
425
+
426
+ MAPE expresses the error as a percentage of the actual value. It can be useful
427
+ for understanding the error relative to the magnitude of the target values.
428
+ However, it has limitations: it's undefined when the target value is zero and
429
+ can be skewed by target values close to zero.
430
+
431
+ Parameters:
432
+ - `` - - An array of target numerical values. Should not contain zeros for standard MAPE.
433
+ - `` - - An array of output values from the network.
434
+
435
+ Returns: The mean absolute percentage error, expressed as a proportion (e.g., 0.1 for 10%).
436
+
437
+ #### mse
438
+
439
+ `(targets: number[], outputs: number[]) => number`
440
+
441
+ Calculates the Mean Squared Error (MSE), a common loss function for regression tasks.
442
+
443
+ MSE measures the average of the squares of the errors—that is, the average
444
+ squared difference between the estimated values and the actual value.
445
+ It is sensitive to outliers due to the squaring of the error terms.
446
+
447
+ Parameters:
448
+ - `` - - An array of target numerical values.
449
+ - `` - - An array of output values from the network.
450
+
451
+ Returns: The mean squared error.
452
+
453
+ #### msle
454
+
455
+ `(targets: number[], outputs: number[]) => number`
456
+
457
+ Calculates the Mean Squared Logarithmic Error (MSLE).
458
+
459
+ MSLE is often used in regression tasks where the target values span a large range
460
+ or when penalizing under-predictions more than over-predictions is desired.
461
+ It measures the squared difference between the logarithms of the predicted and actual values.
462
+ Uses `log(1 + x)` instead of `log(x)` for numerical stability and to handle inputs of 0.
463
+ Assumes both targets and outputs are non-negative.
464
+
465
+ Parameters:
466
+ - `` - - An array of target numerical values (assumed >= 0).
467
+ - `` - - An array of output values from the network (assumed >= 0).
468
+
469
+ Returns: The mean squared logarithmic error.
470
+
471
+ #### reduceOnPlateau
472
+
473
+ `(options: { factor?: number | undefined; patience?: number | undefined; minDelta?: number | undefined; cooldown?: number | undefined; minRate?: number | undefined; verbose?: boolean | undefined; } | undefined) => (baseRate: number, iteration: number, lastError?: number | undefined) => number`
474
+
475
+ ReduceLROnPlateau style scheduler (stateful closure) that monitors error signal (third argument if provided)
476
+ and reduces rate by 'factor' if no improvement beyond 'minDelta' for 'patience' iterations.
477
+ Cooldown prevents immediate successive reductions.
478
+ NOTE: Requires the training loop to call with signature (baseRate, iteration, lastError).
479
+
480
+ #### softmaxCrossEntropy
481
+
482
+ `(targets: number[], outputs: number[]) => number`
483
+
484
+ Softmax Cross Entropy for mutually exclusive multi-class outputs given raw (pre-softmax or arbitrary) scores.
485
+ Applies a numerically stable softmax to the outputs internally then computes -sum(target * log(prob)).
486
+ Targets may be soft labels and are expected to sum to 1 (will be re-normalized if not).
487
+
488
+ #### step
489
+
490
+ `(gamma: number, stepSize: number) => (baseRate: number, iteration: number) => number`
491
+
492
+ Implements a step decay learning rate schedule.
493
+
494
+ The learning rate is reduced by a multiplicative factor (`gamma`)
495
+ at predefined intervals (`stepSize` iterations). This allows for
496
+ faster initial learning, followed by finer adjustments as training progresses.
497
+
498
+ Formula: `learning_rate = baseRate * gamma ^ floor(iteration / stepSize)`
499
+
500
+ Parameters:
501
+ - `gamma` - The factor by which the learning rate is multiplied at each step. Should be less than 1. Defaults to 0.9.
502
+ - `stepSize` - The number of iterations after which the learning rate decays. Defaults to 100.
503
+ - `baseRate` - The initial learning rate.
504
+ - `iteration` - The current training iteration.
505
+
506
+ Returns: A function that calculates the decayed learning rate for a given iteration.
507
+
508
+ ## methods/mutation.ts
509
+
510
+ ### mutation
511
+
512
+ ## methods/rate.ts
513
+
514
+ ### rate
515
+
516
+ Provides various methods for implementing learning rate schedules.
517
+
518
+ Learning rate schedules dynamically adjust the learning rate during the training
519
+ process of machine learning models, particularly neural networks. Adjusting the
520
+ learning rate can significantly impact training speed and performance. A high
521
+ rate might lead to overshooting the optimal solution, while a very low rate
522
+ can result in slow convergence or getting stuck in local minima. These methods
523
+ offer different strategies to balance exploration and exploitation during training.
524
+
525
+ ### Rate
526
+
527
+ Provides various methods for implementing learning rate schedules.
528
+
529
+ Learning rate schedules dynamically adjust the learning rate during the training
530
+ process of machine learning models, particularly neural networks. Adjusting the
531
+ learning rate can significantly impact training speed and performance. A high
532
+ rate might lead to overshooting the optimal solution, while a very low rate
533
+ can result in slow convergence or getting stuck in local minima. These methods
534
+ offer different strategies to balance exploration and exploitation during training.
535
+
536
+ ### default
537
+
538
+ #### cosineAnnealing
539
+
540
+ `(period: number, minRate: number) => (baseRate: number, iteration: number) => number`
541
+
542
+ Implements a Cosine Annealing learning rate schedule.
543
+
544
+ This schedule varies the learning rate cyclically according to a cosine function.
545
+ It starts at the `baseRate` and smoothly anneals down to `minRate` over a
546
+ specified `period` of iterations, then potentially repeats. This can help
547
+ the model escape local minima and explore the loss landscape more effectively.
548
+ Often used with "warm restarts" where the cycle repeats.
549
+
550
+ Formula: `learning_rate = minRate + 0.5 * (baseRate - minRate) * (1 + cos(pi * current_cycle_iteration / period))`
551
+
552
+ Parameters:
553
+ - `period` - The number of iterations over which the learning rate anneals from `baseRate` to `minRate` in one cycle. Defaults to 1000.
554
+ - `minRate` - The minimum learning rate value at the end of a cycle. Defaults to 0.
555
+ - `baseRate` - The initial (maximum) learning rate for the cycle.
556
+ - `iteration` - The current training iteration.
557
+
558
+ Returns: A function that calculates the learning rate for a given iteration based on the cosine annealing schedule.
559
+
560
+ #### cosineAnnealingWarmRestarts
561
+
562
+ `(initialPeriod: number, minRate: number, tMult: number) => (baseRate: number, iteration: number) => number`
563
+
564
+ Cosine Annealing with Warm Restarts (SGDR style) where the cycle length can grow by a multiplier (tMult) after each restart.
565
+
566
+ Parameters:
567
+ - `initialPeriod` - Length of the first cycle in iterations.
568
+ - `minRate` - Minimum learning rate at valley.
569
+ - `tMult` - Factor to multiply the period after each restart (>=1).
570
+
571
+ #### exp
572
+
573
+ `(gamma: number) => (baseRate: number, iteration: number) => number`
574
+
575
+ Implements an exponential decay learning rate schedule.
576
+
577
+ The learning rate decreases exponentially after each iteration, multiplying
578
+ by the decay factor `gamma`. This provides a smooth, continuous reduction
579
+ in the learning rate over time.
580
+
581
+ Formula: `learning_rate = baseRate * gamma ^ iteration`
582
+
583
+ Parameters:
584
+ - `gamma` - The decay factor applied at each iteration. Should be less than 1. Defaults to 0.999.
585
+ - `baseRate` - The initial learning rate.
586
+ - `iteration` - The current training iteration.
587
+
588
+ Returns: A function that calculates the exponentially decayed learning rate for a given iteration.
589
+
590
+ #### fixed
591
+
592
+ `() => (baseRate: number, iteration: number) => number`
593
+
594
+ Implements a fixed learning rate schedule.
595
+
596
+ The learning rate remains constant throughout the entire training process.
597
+ This is the simplest schedule and serves as a baseline, but may not be
598
+ optimal for complex problems.
599
+
600
+ Parameters:
601
+ - `baseRate` - The initial learning rate, which will remain constant.
602
+ - `iteration` - The current training iteration (unused in this method, but included for consistency).
603
+
604
+ Returns: A function that takes the base learning rate and the current iteration number, and always returns the base learning rate.
605
+
606
+ #### inv
607
+
608
+ `(gamma: number, power: number) => (baseRate: number, iteration: number) => number`
609
+
610
+ Implements an inverse decay learning rate schedule.
611
+
612
+ The learning rate decreases as the inverse of the iteration number,
613
+ controlled by the decay factor `gamma` and exponent `power`. The rate
614
+ decreases more slowly over time compared to exponential decay.
615
+
616
+ Formula: `learning_rate = baseRate / (1 + gamma * Math.pow(iteration, power))`
617
+
618
+ Parameters:
619
+ - `gamma` - Controls the rate of decay. Higher values lead to faster decay. Defaults to 0.001.
620
+ - `power` - The exponent controlling the shape of the decay curve. Defaults to 2.
621
+ - `baseRate` - The initial learning rate.
622
+ - `iteration` - The current training iteration.
623
+
624
+ Returns: A function that calculates the inversely decayed learning rate for a given iteration.
625
+
626
+ #### linearWarmupDecay
627
+
628
+ `(totalSteps: number, warmupSteps: number | undefined, endRate: number) => (baseRate: number, iteration: number) => number`
629
+
630
+ Linear Warmup followed by Linear Decay to an end rate.
631
+ Warmup linearly increases LR from near 0 up to baseRate over warmupSteps, then linearly decays to endRate at totalSteps.
632
+ Iterations beyond totalSteps clamp to endRate.
633
+
634
+ Parameters:
635
+ - `totalSteps` - Total steps for full schedule (must be > 0).
636
+ - `warmupSteps` - Steps for warmup (< totalSteps). Defaults to 10% of totalSteps.
637
+ - `endRate` - Final rate at totalSteps.
638
+
639
+ #### reduceOnPlateau
640
+
641
+ `(options: { factor?: number | undefined; patience?: number | undefined; minDelta?: number | undefined; cooldown?: number | undefined; minRate?: number | undefined; verbose?: boolean | undefined; } | undefined) => (baseRate: number, iteration: number, lastError?: number | undefined) => number`
642
+
643
+ ReduceLROnPlateau style scheduler (stateful closure) that monitors error signal (third argument if provided)
644
+ and reduces rate by 'factor' if no improvement beyond 'minDelta' for 'patience' iterations.
645
+ Cooldown prevents immediate successive reductions.
646
+ NOTE: Requires the training loop to call with signature (baseRate, iteration, lastError).
647
+
648
+ #### step
649
+
650
+ `(gamma: number, stepSize: number) => (baseRate: number, iteration: number) => number`
651
+
652
+ Implements a step decay learning rate schedule.
653
+
654
+ The learning rate is reduced by a multiplicative factor (`gamma`)
655
+ at predefined intervals (`stepSize` iterations). This allows for
656
+ faster initial learning, followed by finer adjustments as training progresses.
657
+
658
+ Formula: `learning_rate = baseRate * gamma ^ floor(iteration / stepSize)`
659
+
660
+ Parameters:
661
+ - `gamma` - The factor by which the learning rate is multiplied at each step. Should be less than 1. Defaults to 0.9.
662
+ - `stepSize` - The number of iterations after which the learning rate decays. Defaults to 100.
663
+ - `baseRate` - The initial learning rate.
664
+ - `iteration` - The current training iteration.
665
+
666
+ Returns: A function that calculates the decayed learning rate for a given iteration.
667
+
668
+ ## methods/selection.ts
669
+
670
+ ### selection