@reicek/neataptic-ts 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/ISSUE_TEMPLATE/bug_report.md +33 -0
- package/.github/ISSUE_TEMPLATE/feature_request.md +27 -0
- package/.github/PULL_REQUEST_TEMPLATE.md +28 -0
- package/.github/workflows/ci.yml +41 -0
- package/.github/workflows/deploy-pages.yml +29 -0
- package/.github/workflows/manual_release_pipeline.yml +62 -0
- package/.github/workflows/publish.yml +85 -0
- package/.github/workflows/release_dispatch.yml +38 -0
- package/.travis.yml +5 -0
- package/CONTRIBUTING.md +92 -0
- package/LICENSE +24 -0
- package/ONNX_EXPORT.md +87 -0
- package/README.md +1173 -0
- package/RELEASE.md +54 -0
- package/dist-docs/package.json +1 -0
- package/dist-docs/scripts/generate-docs.d.ts +2 -0
- package/dist-docs/scripts/generate-docs.d.ts.map +1 -0
- package/dist-docs/scripts/generate-docs.js +536 -0
- package/dist-docs/scripts/generate-docs.js.map +1 -0
- package/dist-docs/scripts/render-docs-html.d.ts +2 -0
- package/dist-docs/scripts/render-docs-html.d.ts.map +1 -0
- package/dist-docs/scripts/render-docs-html.js +148 -0
- package/dist-docs/scripts/render-docs-html.js.map +1 -0
- package/docs/FOLDERS.md +14 -0
- package/docs/README.md +1173 -0
- package/docs/architecture/README.md +1391 -0
- package/docs/architecture/index.html +938 -0
- package/docs/architecture/network/README.md +1210 -0
- package/docs/architecture/network/index.html +908 -0
- package/docs/assets/ascii-maze.bundle.js +16542 -0
- package/docs/assets/ascii-maze.bundle.js.map +7 -0
- package/docs/index.html +1419 -0
- package/docs/methods/README.md +670 -0
- package/docs/methods/index.html +477 -0
- package/docs/multithreading/README.md +274 -0
- package/docs/multithreading/index.html +215 -0
- package/docs/multithreading/workers/README.md +23 -0
- package/docs/multithreading/workers/browser/README.md +39 -0
- package/docs/multithreading/workers/browser/index.html +70 -0
- package/docs/multithreading/workers/index.html +57 -0
- package/docs/multithreading/workers/node/README.md +33 -0
- package/docs/multithreading/workers/node/index.html +66 -0
- package/docs/neat/README.md +1284 -0
- package/docs/neat/index.html +906 -0
- package/docs/src/README.md +2659 -0
- package/docs/src/index.html +1579 -0
- package/jest.config.ts +32 -0
- package/package.json +99 -0
- package/plans/HyperMorphoNEAT.md +293 -0
- package/plans/ONNX_EXPORT_PLAN.md +46 -0
- package/scripts/generate-docs.ts +486 -0
- package/scripts/render-docs-html.ts +138 -0
- package/scripts/types.d.ts +2 -0
- package/src/README.md +2659 -0
- package/src/architecture/README.md +1391 -0
- package/src/architecture/activationArrayPool.ts +135 -0
- package/src/architecture/architect.ts +635 -0
- package/src/architecture/connection.ts +148 -0
- package/src/architecture/group.ts +406 -0
- package/src/architecture/layer.ts +804 -0
- package/src/architecture/network/README.md +1210 -0
- package/src/architecture/network/network.activate.ts +223 -0
- package/src/architecture/network/network.connect.ts +157 -0
- package/src/architecture/network/network.deterministic.ts +167 -0
- package/src/architecture/network/network.evolve.ts +426 -0
- package/src/architecture/network/network.gating.ts +186 -0
- package/src/architecture/network/network.genetic.ts +247 -0
- package/src/architecture/network/network.mutate.ts +624 -0
- package/src/architecture/network/network.onnx.ts +463 -0
- package/src/architecture/network/network.prune.ts +216 -0
- package/src/architecture/network/network.remove.ts +96 -0
- package/src/architecture/network/network.serialize.ts +309 -0
- package/src/architecture/network/network.slab.ts +262 -0
- package/src/architecture/network/network.standalone.ts +246 -0
- package/src/architecture/network/network.stats.ts +59 -0
- package/src/architecture/network/network.topology.ts +86 -0
- package/src/architecture/network/network.training.ts +1278 -0
- package/src/architecture/network.ts +1302 -0
- package/src/architecture/node.ts +1288 -0
- package/src/architecture/onnx.ts +3 -0
- package/src/config.ts +83 -0
- package/src/methods/README.md +670 -0
- package/src/methods/activation.ts +372 -0
- package/src/methods/connection.ts +31 -0
- package/src/methods/cost.ts +347 -0
- package/src/methods/crossover.ts +63 -0
- package/src/methods/gating.ts +43 -0
- package/src/methods/methods.ts +8 -0
- package/src/methods/mutation.ts +300 -0
- package/src/methods/rate.ts +257 -0
- package/src/methods/selection.ts +65 -0
- package/src/multithreading/README.md +274 -0
- package/src/multithreading/multi.ts +339 -0
- package/src/multithreading/workers/README.md +23 -0
- package/src/multithreading/workers/browser/README.md +39 -0
- package/src/multithreading/workers/browser/testworker.ts +99 -0
- package/src/multithreading/workers/node/README.md +33 -0
- package/src/multithreading/workers/node/testworker.ts +72 -0
- package/src/multithreading/workers/node/worker.ts +70 -0
- package/src/multithreading/workers/workers.ts +22 -0
- package/src/neat/README.md +1284 -0
- package/src/neat/neat.adaptive.ts +544 -0
- package/src/neat/neat.compat.ts +164 -0
- package/src/neat/neat.constants.ts +20 -0
- package/src/neat/neat.diversity.ts +217 -0
- package/src/neat/neat.evaluate.ts +328 -0
- package/src/neat/neat.evolve.ts +1026 -0
- package/src/neat/neat.export.ts +249 -0
- package/src/neat/neat.helpers.ts +235 -0
- package/src/neat/neat.lineage.ts +220 -0
- package/src/neat/neat.multiobjective.ts +260 -0
- package/src/neat/neat.mutation.ts +718 -0
- package/src/neat/neat.objectives.ts +157 -0
- package/src/neat/neat.pruning.ts +190 -0
- package/src/neat/neat.selection.ts +269 -0
- package/src/neat/neat.speciation.ts +460 -0
- package/src/neat/neat.species.ts +151 -0
- package/src/neat/neat.telemetry.exports.ts +469 -0
- package/src/neat/neat.telemetry.ts +933 -0
- package/src/neat/neat.types.ts +275 -0
- package/src/neat.ts +1042 -0
- package/src/neataptic.ts +10 -0
- package/test/architecture/activationArrayPool.capacity.test.ts +19 -0
- package/test/architecture/activationArrayPool.test.ts +46 -0
- package/test/architecture/connection.test.ts +290 -0
- package/test/architecture/group.test.ts +950 -0
- package/test/architecture/layer.test.ts +1535 -0
- package/test/architecture/network.pruning.test.ts +65 -0
- package/test/architecture/node.test.ts +1602 -0
- package/test/examples/asciiMaze/asciiMaze.e2e.test.ts +499 -0
- package/test/examples/asciiMaze/asciiMaze.ts +41 -0
- package/test/examples/asciiMaze/browser-entry.ts +164 -0
- package/test/examples/asciiMaze/browserLogger.ts +221 -0
- package/test/examples/asciiMaze/browserTerminalUtility.ts +48 -0
- package/test/examples/asciiMaze/colors.ts +119 -0
- package/test/examples/asciiMaze/dashboardManager.ts +968 -0
- package/test/examples/asciiMaze/evolutionEngine.ts +1248 -0
- package/test/examples/asciiMaze/fitness.ts +136 -0
- package/test/examples/asciiMaze/index.html +128 -0
- package/test/examples/asciiMaze/index.ts +26 -0
- package/test/examples/asciiMaze/interfaces.ts +235 -0
- package/test/examples/asciiMaze/mazeMovement.ts +996 -0
- package/test/examples/asciiMaze/mazeUtils.ts +278 -0
- package/test/examples/asciiMaze/mazeVision.ts +402 -0
- package/test/examples/asciiMaze/mazeVisualization.ts +585 -0
- package/test/examples/asciiMaze/mazes.ts +245 -0
- package/test/examples/asciiMaze/networkRefinement.ts +76 -0
- package/test/examples/asciiMaze/networkVisualization.ts +901 -0
- package/test/examples/asciiMaze/terminalUtility.ts +73 -0
- package/test/methods/activation.test.ts +1142 -0
- package/test/methods/connection.test.ts +146 -0
- package/test/methods/cost.test.ts +1123 -0
- package/test/methods/crossover.test.ts +202 -0
- package/test/methods/gating.test.ts +144 -0
- package/test/methods/mutation.test.ts +451 -0
- package/test/methods/optimizers.advanced.test.ts +80 -0
- package/test/methods/optimizers.behavior.test.ts +105 -0
- package/test/methods/optimizers.formula.test.ts +89 -0
- package/test/methods/rate.cosineWarmRestarts.test.ts +44 -0
- package/test/methods/rate.linearWarmupDecay.test.ts +41 -0
- package/test/methods/rate.reduceOnPlateau.test.ts +45 -0
- package/test/methods/rate.test.ts +684 -0
- package/test/methods/selection.test.ts +245 -0
- package/test/multithreading/activations.functions.test.ts +54 -0
- package/test/multithreading/multi.test.ts +290 -0
- package/test/multithreading/worker.node.process.test.ts +39 -0
- package/test/multithreading/workers.coverage.test.ts +36 -0
- package/test/multithreading/workers.dynamic.import.test.ts +8 -0
- package/test/neat/neat.adaptive.complexityBudget.test.ts +34 -0
- package/test/neat/neat.adaptive.criterion.complexity.test.ts +50 -0
- package/test/neat/neat.adaptive.mutation.strategy.test.ts +37 -0
- package/test/neat/neat.adaptive.operator.decay.test.ts +31 -0
- package/test/neat/neat.adaptive.phasedComplexity.test.ts +25 -0
- package/test/neat/neat.adaptive.pruning.test.ts +25 -0
- package/test/neat/neat.adaptive.targetSpecies.test.ts +43 -0
- package/test/neat/neat.additional.coverage.test.ts +126 -0
- package/test/neat/neat.advanced.enhancements.test.ts +85 -0
- package/test/neat/neat.advanced.test.ts +589 -0
- package/test/neat/neat.diversity.autocompat.test.ts +47 -0
- package/test/neat/neat.diversity.metrics.test.ts +21 -0
- package/test/neat/neat.diversity.stats.test.ts +44 -0
- package/test/neat/neat.enhancements.test.ts +79 -0
- package/test/neat/neat.entropy.ancestorAdaptive.test.ts +133 -0
- package/test/neat/neat.entropy.compat.csv.test.ts +108 -0
- package/test/neat/neat.evolution.pruning.test.ts +39 -0
- package/test/neat/neat.fastmode.autotune.test.ts +42 -0
- package/test/neat/neat.innovation.test.ts +134 -0
- package/test/neat/neat.lineage.antibreeding.test.ts +35 -0
- package/test/neat/neat.lineage.entropy.test.ts +56 -0
- package/test/neat/neat.lineage.inbreeding.test.ts +49 -0
- package/test/neat/neat.lineage.pressure.test.ts +29 -0
- package/test/neat/neat.multiobjective.adaptive.test.ts +57 -0
- package/test/neat/neat.multiobjective.dynamic.schedule.test.ts +46 -0
- package/test/neat/neat.multiobjective.dynamic.test.ts +31 -0
- package/test/neat/neat.multiobjective.fastsort.delegation.test.ts +51 -0
- package/test/neat/neat.multiobjective.prune.test.ts +39 -0
- package/test/neat/neat.multiobjective.test.ts +21 -0
- package/test/neat/neat.mutation.undefined.pool.test.ts +24 -0
- package/test/neat/neat.objective.events.test.ts +26 -0
- package/test/neat/neat.objective.importance.test.ts +21 -0
- package/test/neat/neat.objective.lifetimes.test.ts +33 -0
- package/test/neat/neat.offspring.allocation.test.ts +22 -0
- package/test/neat/neat.operator.bandit.test.ts +17 -0
- package/test/neat/neat.operator.phases.test.ts +38 -0
- package/test/neat/neat.pruneInactive.behavior.test.ts +54 -0
- package/test/neat/neat.reenable.adaptation.test.ts +18 -0
- package/test/neat/neat.rng.state.test.ts +22 -0
- package/test/neat/neat.spawn.add.test.ts +123 -0
- package/test/neat/neat.speciation.test.ts +96 -0
- package/test/neat/neat.species.allocation.telemetry.test.ts +26 -0
- package/test/neat/neat.species.history.csv.test.ts +24 -0
- package/test/neat/neat.telemetry.advanced.test.ts +226 -0
- package/test/neat/neat.telemetry.csv.lineage.test.ts +19 -0
- package/test/neat/neat.telemetry.parity.test.ts +42 -0
- package/test/neat/neat.telemetry.stream.test.ts +19 -0
- package/test/neat/neat.telemetry.test.ts +16 -0
- package/test/neat/neat.test.ts +422 -0
- package/test/neat/neat.utilities.test.ts +44 -0
- package/test/network/__suppress_console.ts +9 -0
- package/test/network/acyclic.topoorder.test.ts +17 -0
- package/test/network/checkpoint.metricshook.test.ts +36 -0
- package/test/network/error.handling.test.ts +581 -0
- package/test/network/evolution.test.ts +285 -0
- package/test/network/genetic.test.ts +208 -0
- package/test/network/learning.capability.test.ts +244 -0
- package/test/network/mutation.effects.test.ts +492 -0
- package/test/network/network.activate.test.ts +115 -0
- package/test/network/network.activateBatch.test.ts +30 -0
- package/test/network/network.deterministic.test.ts +64 -0
- package/test/network/network.evolve.branches.test.ts +75 -0
- package/test/network/network.evolve.multithread.branches.test.ts +83 -0
- package/test/network/network.evolve.test.ts +100 -0
- package/test/network/network.gating.removal.test.ts +93 -0
- package/test/network/network.mutate.additional.test.ts +145 -0
- package/test/network/network.mutate.edgecases.test.ts +101 -0
- package/test/network/network.mutate.test.ts +101 -0
- package/test/network/network.prune.earlyexit.test.ts +38 -0
- package/test/network/network.remove.errors.test.ts +45 -0
- package/test/network/network.slab.fallbacks.test.ts +22 -0
- package/test/network/network.stats.test.ts +45 -0
- package/test/network/network.training.advanced.test.ts +149 -0
- package/test/network/network.training.basic.test.ts +228 -0
- package/test/network/network.training.helpers.test.ts +183 -0
- package/test/network/onnx.export.test.ts +310 -0
- package/test/network/onnx.import.test.ts +129 -0
- package/test/network/pruning.topology.test.ts +282 -0
- package/test/network/regularization.determinism.test.ts +83 -0
- package/test/network/regularization.dropconnect.test.ts +17 -0
- package/test/network/regularization.dropconnect.validation.test.ts +18 -0
- package/test/network/regularization.stochasticdepth.test.ts +27 -0
- package/test/network/regularization.test.ts +843 -0
- package/test/network/regularization.weightnoise.test.ts +30 -0
- package/test/network/setupTests.ts +2 -0
- package/test/network/standalone.test.ts +332 -0
- package/test/network/structure.serialization.test.ts +660 -0
- package/test/training/training.determinism.mixed-precision.test.ts +134 -0
- package/test/training/training.earlystopping.test.ts +91 -0
- package/test/training/training.edge-cases.test.ts +91 -0
- package/test/training/training.extensions.test.ts +47 -0
- package/test/training/training.gradient.features.test.ts +110 -0
- package/test/training/training.gradient.refinements.test.ts +170 -0
- package/test/training/training.gradient.separate-bias.test.ts +41 -0
- package/test/training/training.optimizer.test.ts +48 -0
- package/test/training/training.plateau.smoothing.test.ts +58 -0
- package/test/training/training.smoothing.types.test.ts +174 -0
- package/test/training/training.train.options.coverage.test.ts +52 -0
- package/test/utils/console-helper.ts +76 -0
- package/test/utils/jest-setup.ts +60 -0
- package/test/utils/test-helpers.ts +175 -0
- package/tsconfig.docs.json +12 -0
- package/tsconfig.json +21 -0
- package/webpack.config.js +49 -0
|
@@ -0,0 +1,908 @@
|
|
|
1
|
+
<!DOCTYPE html><html><head><meta charset="utf-8"><title>architecture/network</title>
|
|
2
|
+
<meta name="viewport" content="width=device-width,initial-scale=1">
|
|
3
|
+
<style>
|
|
4
|
+
body{font-family:system-ui,-apple-system,Segoe UI,Arial,sans-serif;margin:0 auto;padding:0 20px 60px;line-height:1.55;background:#fff;color:#222;display:grid;grid-template-columns:260px 1fr 280px;grid-gap:32px;}
|
|
5
|
+
nav.site{position:sticky;top:0;align-self:start;max-height:100vh;overflow:auto;padding:24px 0 40px;}
|
|
6
|
+
nav.site h1{font-size:1.05rem;margin:0 0 .75rem;font-weight:600;}
|
|
7
|
+
.doc-nav{list-style:none;margin:0;padding:0;font-size:.85rem;}
|
|
8
|
+
.doc-nav li{margin:2px 0;}
|
|
9
|
+
.doc-nav a{display:block;padding:4px 8px;border-radius:4px;color:#2c3963;text-decoration:none;}
|
|
10
|
+
.doc-nav li.current>a{background:#2c3963;color:#fff;font-weight:600;}
|
|
11
|
+
.doc-nav a:hover{background:#e4e8f3;}
|
|
12
|
+
main{padding:40px 0;}
|
|
13
|
+
aside.page-index{position:sticky;top:0;align-self:start;max-height:100vh;overflow:auto;padding:32px 0 40px;font-size:.85rem;}
|
|
14
|
+
aside.page-index h2{font-size:.9rem;margin:0 0 .75rem;text-transform:uppercase;letter-spacing:.5px;color:#444;}
|
|
15
|
+
aside.page-index .toc-file{margin:0 0 .5rem;}
|
|
16
|
+
aside.page-index ul{list-style:none;margin:.25rem 0 .5rem .25rem;padding:0;}
|
|
17
|
+
aside.page-index li{margin:0;}
|
|
18
|
+
aside.page-index a{color:#444;text-decoration:none;}
|
|
19
|
+
aside.page-index a:hover{color:#2c3963;}
|
|
20
|
+
pre{background:#1e1e1e;color:#eee;padding:12px;border-radius:6px;overflow:auto;}
|
|
21
|
+
code{background:#f5f5f5;padding:2px 4px;border-radius:4px;font-size:90%;}
|
|
22
|
+
pre code{background:transparent;padding:0;font-size:90%;}
|
|
23
|
+
a{color:#2c3963;text-decoration:none;}a:hover{text-decoration:underline;}
|
|
24
|
+
h1,h2,h3,h4{scroll-margin-top:70px;}
|
|
25
|
+
blockquote{border-left:4px solid #ddd;margin:1em 0;padding:.5em 1em;color:#555;}
|
|
26
|
+
table{border-collapse:collapse}th,td{border:1px solid #ccc;padding:4px 8px;text-align:left;}
|
|
27
|
+
footer{margin-top:64px;font-size:.75rem;color:#666;}
|
|
28
|
+
@media (max-width:1100px){body{grid-template-columns:220px 1fr;}aside.page-index{display:none;} }
|
|
29
|
+
@media (max-width:800px){body{grid-template-columns:1fr;}nav.site{position:relative;top:auto;max-height:none;order:2;}main{order:1;padding-top:24px;}}
|
|
30
|
+
</style></head><body>
|
|
31
|
+
<nav class="site">
|
|
32
|
+
<h1>Docs Index</h1>
|
|
33
|
+
<ul class="doc-nav"><li><a href="../../index.html">root</a></li>
|
|
34
|
+
<li><a href="../index.html">architecture/</a></li>
|
|
35
|
+
<li class="current"><a href="./index.html">architecture/network/</a></li>
|
|
36
|
+
<li><a href="../../methods/index.html">methods/</a></li>
|
|
37
|
+
<li><a href="../../multithreading/index.html">multithreading/</a></li>
|
|
38
|
+
<li><a href="../../multithreading/workers/index.html">multithreading/workers/</a></li>
|
|
39
|
+
<li><a href="../../multithreading/workers/browser/index.html">multithreading/workers/browser/</a></li>
|
|
40
|
+
<li><a href="../../multithreading/workers/node/index.html">multithreading/workers/node/</a></li>
|
|
41
|
+
<li><a href="../../neat/index.html">neat/</a></li>
|
|
42
|
+
<li><a href="../../src/index.html">src/</a></li>
|
|
43
|
+
<li><a href="../../../test/examples/asciiMaze/index.html">examples/asciiMaze/</a></li></ul>
|
|
44
|
+
</nav>
|
|
45
|
+
<main>
|
|
46
|
+
<h1 id="architecture-network">architecture/network</h1><h2 id="architecture-network-network-activate-ts">architecture/network/network.activate.ts</h2><h3 id="activatebatch">activateBatch</h3><p><code>(inputs: number[][], training: boolean) => number[][]</code></p>
|
|
47
|
+
<p>Activate the network over a mini‑batch (array) of input vectors, returning a 2‑D array of outputs.</p>
|
|
48
|
+
<p>This helper simply loops, invoking {@link Network.activate} (or its bound variant) for each
|
|
49
|
+
sample. It is intentionally naive: no attempt is made to fuse operations across the batch.
|
|
50
|
+
For very large batch sizes or performance‑critical paths consider implementing a custom
|
|
51
|
+
vectorized backend that exploits SIMD, GPU kernels, or parallel workers.</p>
|
|
52
|
+
<p>Input validation occurs per row to surface the earliest mismatch with a descriptive index.</p>
|
|
53
|
+
<p>Parameters:</p>
|
|
54
|
+
<ul>
|
|
55
|
+
<li><code>this</code> - - Bound {@link Network} instance.</li>
|
|
56
|
+
</ul>
|
|
57
|
+
<ul>
|
|
58
|
+
<li></li>
|
|
59
|
+
</ul>
|
|
60
|
+
<ul>
|
|
61
|
+
<li><code>inputs</code> - - Array of input vectors; each must have length == network.input.</li>
|
|
62
|
+
<li><code>training</code> - - Whether each activation should keep training traces.</li>
|
|
63
|
+
</ul>
|
|
64
|
+
<p>Returns: 2‑D array: outputs[i] is the activation result for inputs[i].</p>
|
|
65
|
+
<h3 id="activateraw">activateRaw</h3><p><code>(input: number[], training: boolean, maxActivationDepth: number) => any</code></p>
|
|
66
|
+
<p>Thin semantic alias to the network's main activation path.</p>
|
|
67
|
+
<p>At present this simply forwards to {@link Network.activate}. The indirection is useful for:</p>
|
|
68
|
+
<ul>
|
|
69
|
+
<li>Future differentiation between raw (immediate) activation and a mode that performs reuse /
|
|
70
|
+
staged batching logic.</li>
|
|
71
|
+
<li>Providing a stable exported symbol for external tooling / instrumentation.</li>
|
|
72
|
+
</ul>
|
|
73
|
+
<p>Parameters:</p>
|
|
74
|
+
<ul>
|
|
75
|
+
<li><code>this</code> - - Bound {@link Network} instance.</li>
|
|
76
|
+
</ul>
|
|
77
|
+
<ul>
|
|
78
|
+
<li></li>
|
|
79
|
+
</ul>
|
|
80
|
+
<ul>
|
|
81
|
+
<li><code>input</code> - - Input vector (length == network.input).</li>
|
|
82
|
+
<li><code>training</code> - - Whether to retain training traces / gradients (delegated downstream).</li>
|
|
83
|
+
<li><code>maxActivationDepth</code> - - Guard against runaway recursion / cyclic activation attempts.</li>
|
|
84
|
+
</ul>
|
|
85
|
+
<p>Returns: Implementation-defined result of Network.activate (typically an output vector).</p>
|
|
86
|
+
<h3 id="notraceactivate">noTraceActivate</h3><p><code>(input: number[]) => number[]</code></p>
|
|
87
|
+
<p>Network activation helpers (forward pass utilities).</p>
|
|
88
|
+
<p>This module provides progressively lower–overhead entry points for performing
|
|
89
|
+
forward propagation through a {@link Network}. The emphasis is on:</p>
|
|
90
|
+
<ol>
|
|
91
|
+
<li>Educative clarity – each step is documented so newcomers can follow the
|
|
92
|
+
life‑cycle of a forward pass in a neural network graph.</li>
|
|
93
|
+
<li>Performance – fast paths avoid unnecessary allocation and bookkeeping when
|
|
94
|
+
gradients / evolution traces are not needed.</li>
|
|
95
|
+
<li>Safety – pooled buffers are never exposed directly to the public API.</li>
|
|
96
|
+
</ol>
|
|
97
|
+
<p>Exported functions:</p>
|
|
98
|
+
<ul>
|
|
99
|
+
<li>{@link noTraceActivate}: ultra‑light inference (no gradients, minimal allocation).</li>
|
|
100
|
+
<li>{@link activateRaw}: thin semantic alias around the canonical Network.activate path.</li>
|
|
101
|
+
<li>{@link activateBatch}: simple mini‑batch loop utility.</li>
|
|
102
|
+
</ul>
|
|
103
|
+
<p>Design terminology used below:</p>
|
|
104
|
+
<ul>
|
|
105
|
+
<li>Topological order: a sequence of nodes such that all directed connections flow forward.</li>
|
|
106
|
+
<li>Slab: a contiguous typed‑array structure packing node activations for vectorized math.</li>
|
|
107
|
+
<li>Trace / gradient bookkeeping: auxiliary data (e.g. eligibility traces, derivative caches)
|
|
108
|
+
required for training algorithms; skipped in inference‑only modes.</li>
|
|
109
|
+
<li>Pool: an object managing reusable arrays to reduce garbage collection pressure.</li>
|
|
110
|
+
</ul>
|
|
111
|
+
<h2 id="architecture-network-network-connect-ts">architecture/network/network.connect.ts</h2><h3 id="connect">connect</h3><p><code>(from: import("D:/code-practice/NeatapticTS/src/architecture/node").default, to: import("D:/code-practice/NeatapticTS/src/architecture/node").default, weight: number | undefined) => import("D:/code-practice/NeatapticTS/src/architecture/connection").default[]</code></p>
|
|
112
|
+
<p>Network structural mutation helpers (connect / disconnect).</p>
|
|
113
|
+
<p>This module centralizes the logic for adding and removing edges (connections) between
|
|
114
|
+
nodes in a {@link Network}. By isolating the book‑keeping here we keep the primary
|
|
115
|
+
Network class lean and ensure consistent handling of:</p>
|
|
116
|
+
<ul>
|
|
117
|
+
<li>Acyclic constraints</li>
|
|
118
|
+
<li>Multiple low‑level connections returned by composite node operations</li>
|
|
119
|
+
<li>Gating & self‑connection invariants</li>
|
|
120
|
+
<li>Cache invalidation (topological order + packed activation slabs)</li>
|
|
121
|
+
</ul>
|
|
122
|
+
<p>Exported functions:</p>
|
|
123
|
+
<ul>
|
|
124
|
+
<li>{@link connect}: Create one or more connections from a source node to a target node.</li>
|
|
125
|
+
<li>{@link disconnect}: Remove (at most) one direct connection from source to target.</li>
|
|
126
|
+
</ul>
|
|
127
|
+
<p>Key terminology:</p>
|
|
128
|
+
<ul>
|
|
129
|
+
<li>Self‑connection: An edge where from === to (loop). Usually disallowed under acyclicity.</li>
|
|
130
|
+
<li>Gating: A mechanism where a third node modulates (gates) the weight / influence of a connection.</li>
|
|
131
|
+
<li>Slab: Packed typed‑array representation of connections for vectorized forward passes.</li>
|
|
132
|
+
</ul>
|
|
133
|
+
<h3 id="disconnect">disconnect</h3><p><code>(from: import("D:/code-practice/NeatapticTS/src/architecture/node").default, to: import("D:/code-practice/NeatapticTS/src/architecture/node").default) => void</code></p>
|
|
134
|
+
<p>Remove (at most) one directed connection from source 'from' to target 'to'.</p>
|
|
135
|
+
<p>Only a single direct edge is removed because typical graph configurations maintain at most
|
|
136
|
+
one logical connection between a given pair of nodes (excluding potential future multi‑edge
|
|
137
|
+
semantics). If the target edge is gated we first call {@link Network.ungate} to maintain
|
|
138
|
+
gating invariants (ensuring the gater node's internal gate list remains consistent).</p>
|
|
139
|
+
<p>Algorithm outline:</p>
|
|
140
|
+
<ol>
|
|
141
|
+
<li>Choose the correct list (selfconns vs connections) based on whether from === to.</li>
|
|
142
|
+
<li>Linear scan to find the first edge with matching endpoints.</li>
|
|
143
|
+
<li>If gated, ungate to detach gater bookkeeping.</li>
|
|
144
|
+
<li>Splice the edge out; exit loop (only one expected).</li>
|
|
145
|
+
<li>Delegate per‑node cleanup via from.disconnect(to) (clears reverse references, traces, etc.).</li>
|
|
146
|
+
<li>Mark structural caches dirty for lazy recomputation.</li>
|
|
147
|
+
</ol>
|
|
148
|
+
<p>Complexity:</p>
|
|
149
|
+
<ul>
|
|
150
|
+
<li>Time: O(m) where m is length of the searched list (connections or selfconns).</li>
|
|
151
|
+
<li>Space: O(1) extra.</li>
|
|
152
|
+
</ul>
|
|
153
|
+
<p>Idempotence: If no such edge exists we still perform node-level disconnect and flag caches dirty –
|
|
154
|
+
this conservative approach simplifies callers (they need not pre‑check existence).</p>
|
|
155
|
+
<p>Parameters:</p>
|
|
156
|
+
<ul>
|
|
157
|
+
<li><code>this</code> - - Bound {@link Network} instance.</li>
|
|
158
|
+
</ul>
|
|
159
|
+
<ul>
|
|
160
|
+
<li></li>
|
|
161
|
+
</ul>
|
|
162
|
+
<ul>
|
|
163
|
+
<li><code>from</code> - - Source node.</li>
|
|
164
|
+
<li><code>to</code> - - Target node.</li>
|
|
165
|
+
</ul>
|
|
166
|
+
<h2 id="architecture-network-network-deterministic-ts">architecture/network/network.deterministic.ts</h2><h3 id="getrandomfn">getRandomFn</h3><p><code>() => (() => number) | undefined</code></p>
|
|
167
|
+
<p>Retrieve the active random function reference (for testing, instrumentation, or swapping).</p>
|
|
168
|
+
<p>Mutating the returned function's closure variables (if any) is not recommended; prefer using
|
|
169
|
+
higher-level APIs (setSeed / restoreRNG) to manage state.</p>
|
|
170
|
+
<p>Parameters:</p>
|
|
171
|
+
<ul>
|
|
172
|
+
<li><code>this</code> - - Bound {@link Network} instance.</li>
|
|
173
|
+
</ul>
|
|
174
|
+
<ul>
|
|
175
|
+
<li></li>
|
|
176
|
+
</ul>
|
|
177
|
+
<p>Returns: Function producing numbers in [0,1). May be undefined if never seeded (call setSeed first).</p>
|
|
178
|
+
<h3 id="getrngstate">getRNGState</h3><p><code>() => number | undefined</code></p>
|
|
179
|
+
<p>Get the current internal 32‑bit RNG state value.</p>
|
|
180
|
+
<p>Parameters:</p>
|
|
181
|
+
<ul>
|
|
182
|
+
<li><code>this</code> - - Bound {@link Network} instance.</li>
|
|
183
|
+
</ul>
|
|
184
|
+
<ul>
|
|
185
|
+
<li></li>
|
|
186
|
+
</ul>
|
|
187
|
+
<p>Returns: Unsigned 32‑bit state integer or undefined if generator not yet seeded or was reset.</p>
|
|
188
|
+
<h3 id="network-deterministic">network.deterministic</h3><p>Default export bundle for convenient named imports.</p>
|
|
189
|
+
<h3 id="restorerng">restoreRNG</h3><p><code>(fn: () => number) => void</code></p>
|
|
190
|
+
<p>Restore a previously captured RNG function implementation (advanced usage).</p>
|
|
191
|
+
<p>This does NOT rehydrate _rngState (it explicitly sets it to undefined). Intended for scenarios
|
|
192
|
+
where a caller has customly serialized a full RNG closure or wants to inject a deterministic stub.
|
|
193
|
+
If you only need to restore the raw state word produced by {@link snapshotRNG}, prefer
|
|
194
|
+
{@link setRNGState} instead.</p>
|
|
195
|
+
<p>Parameters:</p>
|
|
196
|
+
<ul>
|
|
197
|
+
<li><code>this</code> - - Bound {@link Network} instance.</li>
|
|
198
|
+
</ul>
|
|
199
|
+
<ul>
|
|
200
|
+
<li></li>
|
|
201
|
+
</ul>
|
|
202
|
+
<ul>
|
|
203
|
+
<li><code>fn</code> - - Function returning a pseudo‑random number in [0,1). Caller guarantees determinism if required.</li>
|
|
204
|
+
</ul>
|
|
205
|
+
<h3 id="rngsnapshot">RNGSnapshot</h3><p>Deterministic pseudo‑random number generation (PRNG) utilities for {@link Network}.</p>
|
|
206
|
+
<p>Why this module exists:</p>
|
|
207
|
+
<ul>
|
|
208
|
+
<li>Facilitates reproducible evolutionary runs / gradient training by allowing explicit seeding.</li>
|
|
209
|
+
<li>Centralizes RNG state management & snapshot/restore operations (useful for rollbacks or
|
|
210
|
+
deterministic tests around mutation sequences).</li>
|
|
211
|
+
<li>Keeps the core Network class focused by extracting ancillary RNG concerns.</li>
|
|
212
|
+
</ul>
|
|
213
|
+
<p>Implementation notes:</p>
|
|
214
|
+
<ul>
|
|
215
|
+
<li>Uses a small, fast 32‑bit xorshift / mix style generator (same semantics as the legacy inline version)
|
|
216
|
+
combining an additive Weyl sequence step plus a few avalanche-style integer mixes.</li>
|
|
217
|
+
<li>Not cryptographically secure. Do not use for security / fairness sensitive applications.</li>
|
|
218
|
+
<li>Produces floating point numbers in [0,1) with 2^32 (~4.29e9) discrete possible mantissa states.</li>
|
|
219
|
+
</ul>
|
|
220
|
+
<p>Public surface:</p>
|
|
221
|
+
<ul>
|
|
222
|
+
<li>{@link setSeed}: Initialize deterministic generator with a numeric seed.</li>
|
|
223
|
+
<li>{@link snapshotRNG}: Capture current training step + raw internal RNG state.</li>
|
|
224
|
+
<li>{@link restoreRNG}: Provide an externally saved RNG function (advanced) & clear stored state.</li>
|
|
225
|
+
<li>{@link getRNGState} / {@link setRNGState}: Low-level accessors for the internal 32‑bit state word.</li>
|
|
226
|
+
<li>{@link getRandomFn}: Retrieve the active random() function reference (primarily for tests / tooling).</li>
|
|
227
|
+
</ul>
|
|
228
|
+
<p>Design rationale:</p>
|
|
229
|
+
<ul>
|
|
230
|
+
<li>Storing both a state integer (_rngState) and a function (_rand) allows hot-swapping alternative
|
|
231
|
+
RNG implementations (e.g., for benchmarking or pluggable randomness strategies) without rewriting
|
|
232
|
+
callsites inside Network algorithms.</li>
|
|
233
|
+
</ul>
|
|
234
|
+
<h3 id="setrngstate">setRNGState</h3><p><code>(state: number) => void</code></p>
|
|
235
|
+
<p>Explicitly set (override) the internal 32‑bit RNG state without changing the generator function.</p>
|
|
236
|
+
<p>This is a low‑level operation; typical clients should call {@link setSeed}. Provided for advanced
|
|
237
|
+
replay functionality where the same PRNG algorithm is assumed but you want to resume exactly at a
|
|
238
|
+
known state word.</p>
|
|
239
|
+
<p>Parameters:</p>
|
|
240
|
+
<ul>
|
|
241
|
+
<li><code>this</code> - - Bound {@link Network} instance.</li>
|
|
242
|
+
</ul>
|
|
243
|
+
<ul>
|
|
244
|
+
<li></li>
|
|
245
|
+
</ul>
|
|
246
|
+
<ul>
|
|
247
|
+
<li><code>state</code> - - Any finite number (only low 32 bits used). Ignored if not numeric.</li>
|
|
248
|
+
</ul>
|
|
249
|
+
<h3 id="setseed">setSeed</h3><p><code>(seed: number) => void</code></p>
|
|
250
|
+
<p>Seed the internal PRNG and install a deterministic random() implementation on the Network instance.</p>
|
|
251
|
+
<p>Process:</p>
|
|
252
|
+
<ol>
|
|
253
|
+
<li>Coerce the provided seed to an unsigned 32‑bit integer (>>> 0) for predictable wraparound behavior.</li>
|
|
254
|
+
<li>Define an inline closure that advances an internal 32‑bit state using:
|
|
255
|
+
a. A Weyl increment (adding constant 0x6D2B79F5 each call) ensuring full-period traversal of
|
|
256
|
+
the 32‑bit space when combined with mixing.
|
|
257
|
+
b. Two rounds of xorshift / integer mixing (xor, shifts, multiplications) to decorrelate bits.
|
|
258
|
+
c. Normalization to [0,1) by dividing the final 32‑bit unsigned integer by 2^32.</li>
|
|
259
|
+
</ol>
|
|
260
|
+
<p>Bit-mixing explanation (rough intuition):</p>
|
|
261
|
+
<ul>
|
|
262
|
+
<li>XOR with shifted versions spreads high-order entropy to lower bits.</li>
|
|
263
|
+
<li>Multiplication (Math.imul) with carefully chosen odd constants introduces non-linear mixing.</li>
|
|
264
|
+
<li>The final right shift & xor avalanche aims to reduce sequential correlation.</li>
|
|
265
|
+
</ul>
|
|
266
|
+
<p>Parameters:</p>
|
|
267
|
+
<ul>
|
|
268
|
+
<li><code>this</code> - - Bound {@link Network} instance.</li>
|
|
269
|
+
</ul>
|
|
270
|
+
<ul>
|
|
271
|
+
<li></li>
|
|
272
|
+
</ul>
|
|
273
|
+
<ul>
|
|
274
|
+
<li><code>seed</code> - - Any finite number; only its lower 32 bits are used.</li>
|
|
275
|
+
</ul>
|
|
276
|
+
<h3 id="snapshotrng">snapshotRNG</h3><p><code>() => import("D:/code-practice/NeatapticTS/src/architecture/network/network.deterministic").RNGSnapshot</code></p>
|
|
277
|
+
<p>Capture a snapshot of the RNG state together with the network's training step.</p>
|
|
278
|
+
<p>Useful for implementing speculative evolutionary mutations where you may revert both the
|
|
279
|
+
structural change and the randomness timeline if accepting/rejecting a candidate.</p>
|
|
280
|
+
<p>Parameters:</p>
|
|
281
|
+
<ul>
|
|
282
|
+
<li><code>this</code> - - Bound {@link Network} instance.</li>
|
|
283
|
+
</ul>
|
|
284
|
+
<ul>
|
|
285
|
+
<li></li>
|
|
286
|
+
</ul>
|
|
287
|
+
<p>Returns: Object containing current training step & 32‑bit RNG state (both possibly undefined if unseeded).</p>
|
|
288
|
+
<h2 id="architecture-network-network-evolve-ts">architecture/network/network.evolve.ts</h2><h3 id="buildmultithreadfitness">buildMultiThreadFitness</h3><p><code>(set: TrainingSample[], cost: any, amount: number, growth: number, threads: number, options: any) => Promise<{ fitnessFunction: (genome: import("D:/code-practice/NeatapticTS/src/architecture/network").default) => number; threads: number; } | { fitnessFunction: (population: import("D:/code-practice/NeatapticTS/src/architecture/network").default[]) => Promise<void>; threads: number; }></code></p>
|
|
289
|
+
<p>Build a multi-threaded (worker-based) population fitness evaluator if worker infrastructure is available.</p>
|
|
290
|
+
<p>Strategy:</p>
|
|
291
|
+
<ul>
|
|
292
|
+
<li>Attempt to dynamically obtain a Worker constructor (node or browser variant).</li>
|
|
293
|
+
<li>If not possible, gracefully fall back to single-thread evaluation.</li>
|
|
294
|
+
<li>Spawn N workers (threads) each capable of evaluating genomes by calling worker.evaluate(genome).</li>
|
|
295
|
+
<li>Provide a fitness function that takes the whole population and returns a Promise that resolves
|
|
296
|
+
when all queued genomes have been processed. Each genome's score is written in-place.</li>
|
|
297
|
+
</ul>
|
|
298
|
+
<p>Implementation details:</p>
|
|
299
|
+
<ul>
|
|
300
|
+
<li>Queue: simple FIFO (array shift) suffices because ordering is not critical.</li>
|
|
301
|
+
<li>Robustness: Each worker evaluation is wrapped with error handling to prevent a single failure
|
|
302
|
+
from stalling the batch; failed evaluations simply proceed to next genome.</li>
|
|
303
|
+
<li>Complexity penalty applied after raw result retrieval: genome.score = -result - penalty.</li>
|
|
304
|
+
</ul>
|
|
305
|
+
<p>Returned metadata sets options.fitnessPopulation=true so downstream NEAT logic treats the fitness
|
|
306
|
+
function as operating over the entire population at once (rather than per-genome).</p>
|
|
307
|
+
<p>Parameters:</p>
|
|
308
|
+
<ul>
|
|
309
|
+
<li><code>set</code> - - Dataset.</li>
|
|
310
|
+
<li><code>cost</code> - - Cost function.</li>
|
|
311
|
+
<li><code>amount</code> - - Repetition count (unused directly here; assumed handled inside worker.evaluate result metric if needed).</li>
|
|
312
|
+
<li><code>growth</code> - - Complexity penalty scalar.</li>
|
|
313
|
+
<li><code>threads</code> - - Desired worker count.</li>
|
|
314
|
+
<li><code>options</code> - - Evolution options object (mutated to add cleanup hooks & flags).</li>
|
|
315
|
+
</ul>
|
|
316
|
+
<p>Returns: Object with fitnessFunction (population evaluator) and resolved thread count.</p>
|
|
317
|
+
<h3 id="buildsinglethreadfitness">buildSingleThreadFitness</h3><p><code>(set: TrainingSample[], cost: any, amount: number, growth: number) => (genome: import("D:/code-practice/NeatapticTS/src/architecture/network").default) => number</code></p>
|
|
318
|
+
<p>Build a single-threaded fitness evaluation function (classic NEAT style) evaluating a genome
|
|
319
|
+
over the provided dataset and returning a scalar score where higher is better.</p>
|
|
320
|
+
<p>Fitness Definition:
|
|
321
|
+
fitness = -averageError - complexityPenalty
|
|
322
|
+
We accumulate negative error (so lower error => higher fitness) over <code>amount</code> independent
|
|
323
|
+
evaluations (amount>1 can smooth stochastic evaluation noise) then subtract complexity penalty.</p>
|
|
324
|
+
<p>Error handling: If evaluation throws (numerical instability, internal error) we return -Infinity
|
|
325
|
+
so such genomes are strongly disfavored.</p>
|
|
326
|
+
<p>Parameters:</p>
|
|
327
|
+
<ul>
|
|
328
|
+
<li><code>set</code> - - Dataset of training samples.</li>
|
|
329
|
+
<li><code>cost</code> - - Cost function reference (should expose error computation in genome.test).</li>
|
|
330
|
+
<li><code>amount</code> - - Number of repeated evaluations to average.</li>
|
|
331
|
+
<li><code>growth</code> - - Complexity penalty scalar.</li>
|
|
332
|
+
</ul>
|
|
333
|
+
<p>Returns: Function mapping a Network genome to a numeric fitness.</p>
|
|
334
|
+
<h3 id="computecomplexitypenalty">computeComplexityPenalty</h3><p><code>(genome: import("D:/code-practice/NeatapticTS/src/architecture/network").default, growth: number) => number</code></p>
|
|
335
|
+
<p>Compute a structural complexity penalty scaled by a growth factor.</p>
|
|
336
|
+
<p>Complexity heuristic:
|
|
337
|
+
(hidden nodes) + (connections) + (gates)
|
|
338
|
+
hidden nodes = total nodes - input - output (to avoid penalizing fixed I/O interface size).</p>
|
|
339
|
+
<p>Rationale: Encourages minimal / parsimonious networks by subtracting a term from fitness
|
|
340
|
+
proportional to network size, counteracting bloat. Growth hyper‑parameter tunes pressure.</p>
|
|
341
|
+
<p>Caching strategy: We memoize the base complexity (pre‑growth scaling) per genome when its
|
|
342
|
+
structural counts (nodes / connections / gates) are unchanged. This is safe because only
|
|
343
|
+
structural mutations alter these counts, and those invalidate earlier entries naturally
|
|
344
|
+
(since mutated genomes are distinct object references in typical NEAT flows).</p>
|
|
345
|
+
<p>Parameters:</p>
|
|
346
|
+
<ul>
|
|
347
|
+
<li><code>genome</code> - - Candidate network whose complexity to measure.</li>
|
|
348
|
+
<li><code>growth</code> - - Positive scalar controlling strength of parsimony pressure.</li>
|
|
349
|
+
</ul>
|
|
350
|
+
<p>Returns: Complexity * growth (used directly to subtract from fitness score).</p>
|
|
351
|
+
<h3 id="evolutionconfig">EvolutionConfig</h3><p>Internal evolution configuration summary (for potential logging / debugging)
|
|
352
|
+
capturing normalized option values used by the local evolutionary loop.</p>
|
|
353
|
+
<h3 id="evolvenetwork">evolveNetwork</h3><p><code>(set: TrainingSample[], options: any) => Promise<{ error: number; iterations: number; time: number; }></code></p>
|
|
354
|
+
<p>Evolve (optimize) the current network's topology and weights using a NEAT-like evolutionary loop
|
|
355
|
+
until a stopping criterion (target error or max iterations) is met.</p>
|
|
356
|
+
<p>High-level process:</p>
|
|
357
|
+
<ol>
|
|
358
|
+
<li>Validate dataset shape (input/output vector sizes must match network I/O counts).</li>
|
|
359
|
+
<li>Normalize / default option values and construct an internal configuration summary.</li>
|
|
360
|
+
<li>Build appropriate fitness evaluation function (single or multi-thread).</li>
|
|
361
|
+
<li>Initialize a Neat population (optionally with speciation) seeded by this network.</li>
|
|
362
|
+
<li>Iteratively call neat.evolve():<ul>
|
|
363
|
+
<li>Retrieve fittest genome + its fitness.</li>
|
|
364
|
+
<li>Derive an error metric from fitness (inverse relationship considering complexity penalty).</li>
|
|
365
|
+
<li>Track best genome overall (elitism) and perform logging/scheduling callbacks.</li>
|
|
366
|
+
<li>Break if error criterion satisfied or iterations exceeded.</li>
|
|
367
|
+
</ul>
|
|
368
|
+
</li>
|
|
369
|
+
<li>Replace this network's internal structural arrays with the best discovered genome's (in-place upgrade).</li>
|
|
370
|
+
<li>Cleanup any worker threads and report final statistics.</li>
|
|
371
|
+
</ol>
|
|
372
|
+
<p>Fitness / Error relationship:
|
|
373
|
+
fitness = -error - complexityPenalty => error = -(fitness - complexityPenalty)
|
|
374
|
+
We recompute error from the stored fitness plus penalty to ensure consistent reporting.</p>
|
|
375
|
+
<p>Resilience strategies:</p>
|
|
376
|
+
<ul>
|
|
377
|
+
<li>Guard against infinite / NaN errors; after MAX_INF consecutive invalid errors we abort.</li>
|
|
378
|
+
<li>Fallback for tiny populations: increase mutation aggressiveness to prevent premature convergence.</li>
|
|
379
|
+
</ul>
|
|
380
|
+
<p>Parameters:</p>
|
|
381
|
+
<ul>
|
|
382
|
+
<li><code>this</code> - - Bound {@link Network} instance being evolved in-place.</li>
|
|
383
|
+
</ul>
|
|
384
|
+
<ul>
|
|
385
|
+
<li></li>
|
|
386
|
+
</ul>
|
|
387
|
+
<ul>
|
|
388
|
+
<li><code>set</code> - - Supervised dataset (array of {input, output}).</li>
|
|
389
|
+
<li><code>options</code> - - Evolution options (see README / docs). Key fields include:</li>
|
|
390
|
+
<li>iterations: maximum generations (if omitted must supply error target)</li>
|
|
391
|
+
<li>error: target error threshold (if omitted must supply iterations)</li>
|
|
392
|
+
<li>growth: complexity penalty scaling</li>
|
|
393
|
+
<li>amount: number of score evaluations (averaged) per genome</li>
|
|
394
|
+
<li>threads: desired worker count (>=2 enables multi-thread path if available)</li>
|
|
395
|
+
<li>popsize / populationSize: population size</li>
|
|
396
|
+
<li>schedule: { iterations: number, function: (ctx) => void } periodic callback</li>
|
|
397
|
+
<li>log: generation interval for console logging</li>
|
|
398
|
+
<li>clear: whether to call network.clear() after adopting best genome</li>
|
|
399
|
+
</ul>
|
|
400
|
+
<p>Returns: Summary object { error, iterations, time(ms) }.</p>
|
|
401
|
+
<h3 id="trainingsample">TrainingSample</h3><p>A single supervised training example used to evaluate fitness.</p>
|
|
402
|
+
<h2 id="architecture-network-network-gating-ts">architecture/network/network.gating.ts</h2><h3 id="gate">gate</h3><p><code>(node: import("D:/code-practice/NeatapticTS/src/architecture/node").default, connection: import("D:/code-practice/NeatapticTS/src/architecture/connection").default) => void</code></p>
|
|
403
|
+
<p>Gating & node removal utilities for {@link Network}.</p>
|
|
404
|
+
<p>Gating concept:</p>
|
|
405
|
+
<ul>
|
|
406
|
+
<li>A "gater" node modulates the effective weight of a target connection. Conceptually the raw
|
|
407
|
+
connection weight w is multiplied (or otherwise transformed) by a function of the gater node's
|
|
408
|
+
activation a_g (actual math lives in {@link Node.gate}). This enables dynamic, context-sensitive
|
|
409
|
+
routing (similar in spirit to attention mechanisms or LSTM-style gates) within an evolved topology.</li>
|
|
410
|
+
</ul>
|
|
411
|
+
<p>Removal strategy (removeNode):</p>
|
|
412
|
+
<ul>
|
|
413
|
+
<li>When excising a hidden node we attempt to preserve overall connectivity by creating bridging
|
|
414
|
+
connections from each of its predecessors to each of its successors if such edges do not already
|
|
415
|
+
exist. Optional logic reassigns previous gater nodes to these new edges (best-effort) to preserve
|
|
416
|
+
modulation diversity.</li>
|
|
417
|
+
</ul>
|
|
418
|
+
<p>Mutation interplay:</p>
|
|
419
|
+
<ul>
|
|
420
|
+
<li>The flag <code>mutation.SUB_NODE.keep_gates</code> determines whether gating nodes associated with edges
|
|
421
|
+
passing through the removed node should be retained and reassigned.</li>
|
|
422
|
+
</ul>
|
|
423
|
+
<p>Determinism note:</p>
|
|
424
|
+
<ul>
|
|
425
|
+
<li>Bridging gate reassignment currently uses Math.random directly; for fully deterministic runs
|
|
426
|
+
you may consider replacing with the network's seeded RNG (if provided) in future refactors.</li>
|
|
427
|
+
</ul>
|
|
428
|
+
<p>Exported functions:</p>
|
|
429
|
+
<ul>
|
|
430
|
+
<li>{@link gate}: Attach a gater to a connection.</li>
|
|
431
|
+
<li>{@link ungate}: Remove gating from a connection.</li>
|
|
432
|
+
<li>{@link removeNode}: Remove a hidden node while attempting to preserve connectivity & gating.</li>
|
|
433
|
+
</ul>
|
|
434
|
+
<h3 id="removenode">removeNode</h3><p><code>(node: import("D:/code-practice/NeatapticTS/src/architecture/node").default) => void</code></p>
|
|
435
|
+
<p>Remove a hidden node from the network while attempting to preserve functional connectivity.</p>
|
|
436
|
+
<p>Algorithm outline:</p>
|
|
437
|
+
<ol>
|
|
438
|
+
<li>Reject removal if node is input/output (structural invariants) or absent (error).</li>
|
|
439
|
+
<li>Optionally collect gating nodes (if keep_gates flag) from inbound & outbound connections.</li>
|
|
440
|
+
<li>Remove self-loop (if present) to simplify subsequent edge handling.</li>
|
|
441
|
+
<li>Disconnect all inbound edges (record their source nodes) and all outbound edges (record targets).</li>
|
|
442
|
+
<li>For every (input predecessor, output successor) pair create a new connection unless:
|
|
443
|
+
a. input === output (avoid trivial self loops) OR
|
|
444
|
+
b. an existing projection already connects them.</li>
|
|
445
|
+
<li>Reassign preserved gater nodes randomly onto newly created bridging connections.</li>
|
|
446
|
+
<li>Ungate any connections that were gated BY this node (where node acted as gater).</li>
|
|
447
|
+
<li>Remove node from network node list and flag node index cache as dirty.</li>
|
|
448
|
+
</ol>
|
|
449
|
+
<p>Complexity summary:</p>
|
|
450
|
+
<ul>
|
|
451
|
+
<li>Let I = number of inbound edges, O = number of outbound edges.</li>
|
|
452
|
+
<li>Disconnect phase: O(I + O)</li>
|
|
453
|
+
<li>Bridging phase: O(I * O) connection existence checks (isProjectingTo) + potential additions.</li>
|
|
454
|
+
<li>Gater reassignment: O(min(G, newConnections)) where G is number of preserved gaters.</li>
|
|
455
|
+
</ul>
|
|
456
|
+
<p>Preservation rationale:</p>
|
|
457
|
+
<ul>
|
|
458
|
+
<li>Reassigning gaters maintains some of the dynamic modulation capacity that would otherwise
|
|
459
|
+
be lost, aiding continuity during topology simplification.</li>
|
|
460
|
+
</ul>
|
|
461
|
+
<p>Parameters:</p>
|
|
462
|
+
<ul>
|
|
463
|
+
<li><code>this</code> - - Bound {@link Network} instance.</li>
|
|
464
|
+
</ul>
|
|
465
|
+
<ul>
|
|
466
|
+
<li></li>
|
|
467
|
+
</ul>
|
|
468
|
+
<ul>
|
|
469
|
+
<li><code>node</code> - - Hidden node to remove.</li>
|
|
470
|
+
</ul>
|
|
471
|
+
<h3 id="ungate">ungate</h3><p><code>(connection: import("D:/code-practice/NeatapticTS/src/architecture/connection").default) => void</code></p>
|
|
472
|
+
<p>Remove gating from a connection, restoring its static weight contribution.</p>
|
|
473
|
+
<p>Idempotent: If the connection is not currently gated, the call performs no structural changes
|
|
474
|
+
(and optionally logs a warning). After ungating, the connection's weight will be used directly
|
|
475
|
+
without modulation by a gater activation.</p>
|
|
476
|
+
<p>Complexity: O(n) where n = number of gated connections (indexOf lookup) – typically small.</p>
|
|
477
|
+
<p>Parameters:</p>
|
|
478
|
+
<ul>
|
|
479
|
+
<li><code>this</code> - - Bound {@link Network} instance.</li>
|
|
480
|
+
</ul>
|
|
481
|
+
<ul>
|
|
482
|
+
<li></li>
|
|
483
|
+
</ul>
|
|
484
|
+
<ul>
|
|
485
|
+
<li><code>connection</code> - - Connection to ungate.</li>
|
|
486
|
+
</ul>
|
|
487
|
+
<h2 id="architecture-network-network-genetic-ts">architecture/network/network.genetic.ts</h2><h3 id="crossover">crossOver</h3><p><code>(network1: import("D:/code-practice/NeatapticTS/src/architecture/network").default, network2: import("D:/code-practice/NeatapticTS/src/architecture/network").default, equal: boolean) => import("D:/code-practice/NeatapticTS/src/architecture/network").default</code></p>
|
|
488
|
+
<p>Genetic operator: NEAT‑style crossover (legacy merge operator removed).</p>
|
|
489
|
+
<p>This module now focuses solely on producing recombinant offspring via {@link crossOver}.
|
|
490
|
+
The previous experimental Network.merge has been removed to reduce maintenance surface area
|
|
491
|
+
and avoid implying a misleading “sequential composition” guarantee.</p>
|
|
492
|
+
<h2 id="architecture-network-network-mutate-ts">architecture/network/network.mutate.ts</h2><h3 id="addbackconn">_addBackConn</h3><p><code>() => void</code></p>
|
|
493
|
+
<p>ADD_BACK_CONN: Add a backward (recurrent) connection (acyclic mode must be off).</p>
|
|
494
|
+
<h3 id="addconn">_addConn</h3><p><code>() => void</code></p>
|
|
495
|
+
<p>ADD_CONN: Add a new forward (acyclic) connection between two previously unconnected nodes.
|
|
496
|
+
Recurrent edges are handled separately by ADD_BACK_CONN.</p>
|
|
497
|
+
<h3 id="addgate">_addGate</h3><p><code>() => void</code></p>
|
|
498
|
+
<p>ADD_GATE: Assign a random (hidden/output) node to gate a random ungated connection.</p>
|
|
499
|
+
<h3 id="addgrunode">_addGRUNode</h3><p><code>() => void</code></p>
|
|
500
|
+
<p>ADD_GRU_NODE: Replace a random connection with a minimal 1‑unit GRU block.</p>
|
|
501
|
+
<h3 id="addlstmnode">_addLSTMNode</h3><p><code>() => void</code></p>
|
|
502
|
+
<p>ADD_LSTM_NODE: Replace a random connection with a minimal 1‑unit LSTM block (macro mutation).</p>
|
|
503
|
+
<h3 id="addnode">_addNode</h3><p><code>() => void</code></p>
|
|
504
|
+
<p>ADD_NODE: Insert a new hidden node by splitting an existing connection.</p>
|
|
505
|
+
<p>Deterministic test mode (config.deterministicChainMode):</p>
|
|
506
|
+
<ul>
|
|
507
|
+
<li>Maintain an internal linear chain (input → hidden* → output).</li>
|
|
508
|
+
<li>Always split the chain's terminal edge, guaranteeing depth +1 per call.</li>
|
|
509
|
+
<li>Prune side edges from chain nodes to keep depth measurement unambiguous.</li>
|
|
510
|
+
</ul>
|
|
511
|
+
<p>Standard evolutionary mode:</p>
|
|
512
|
+
<ul>
|
|
513
|
+
<li>Sample a random existing connection and perform the classical NEAT split.</li>
|
|
514
|
+
</ul>
|
|
515
|
+
<p>Core algorithm (stochastic variant):</p>
|
|
516
|
+
<ol>
|
|
517
|
+
<li>Pick connection (random).</li>
|
|
518
|
+
<li>Disconnect it (preserve any gater reference).</li>
|
|
519
|
+
<li>Create hidden node (random activation mutation).</li>
|
|
520
|
+
<li>Insert before output tail to preserve ordering invariants.</li>
|
|
521
|
+
<li>Connect source→hidden and hidden→target.</li>
|
|
522
|
+
<li>Reassign gater uniformly to one of the new edges.</li>
|
|
523
|
+
</ol>
|
|
524
|
+
<h3 id="addselfconn">_addSelfConn</h3><p><code>() => void</code></p>
|
|
525
|
+
<p>ADD_SELF_CONN: Add a self loop to a random eligible node (only when cycles allowed).</p>
|
|
526
|
+
<h3 id="batchnorm">_batchNorm</h3><p><code>() => void</code></p>
|
|
527
|
+
<p>BATCH_NORM: Placeholder mutation – marks a random hidden node with a flag for potential
|
|
528
|
+
future batch normalization integration. Currently a no-op beyond tagging.</p>
|
|
529
|
+
<h3 id="modactivation">_modActivation</h3><p><code>(method: any) => void</code></p>
|
|
530
|
+
<p>MOD_ACTIVATION: Swap activation (squash) of a random eligible node; may exclude outputs.</p>
|
|
531
|
+
<h3 id="modbias">_modBias</h3><p><code>(method: any) => void</code></p>
|
|
532
|
+
<p>MOD_BIAS: Delegate to node.mutate to adjust bias of a random non‑input node.</p>
|
|
533
|
+
<h3 id="modweight">_modWeight</h3><p><code>(method: any) => void</code></p>
|
|
534
|
+
<p>MOD_WEIGHT: Perturb a single (possibly self) connection weight by uniform delta in [min,max].</p>
|
|
535
|
+
<h3 id="reinitweight">_reinitWeight</h3><p><code>(method: any) => void</code></p>
|
|
536
|
+
<p>REINIT_WEIGHT: Reinitialize all incoming/outgoing/self connection weights for a random node.
|
|
537
|
+
Useful as a heavy mutation to escape local minima. Falls back silently if no eligible node.</p>
|
|
538
|
+
<h3 id="subbackconn">_subBackConn</h3><p><code>() => void</code></p>
|
|
539
|
+
<p>SUB_BACK_CONN: Remove a backward connection meeting redundancy heuristics.</p>
|
|
540
|
+
<h3 id="subconn">_subConn</h3><p><code>() => void</code></p>
|
|
541
|
+
<p>SUB_CONN: Remove a forward connection chosen under redundancy heuristics to avoid disconnects.</p>
|
|
542
|
+
<h3 id="subgate">_subGate</h3><p><code>() => void</code></p>
|
|
543
|
+
<p>SUB_GATE: Remove gating from a random previously gated connection.</p>
|
|
544
|
+
<h3 id="subnode">_subNode</h3><p><code>() => void</code></p>
|
|
545
|
+
<p>SUB_NODE: Remove a random hidden node (if any remain).
|
|
546
|
+
After removal a tiny deterministic weight nudge encourages observable phenotype change in tests.</p>
|
|
547
|
+
<h3 id="subselfconn">_subSelfConn</h3><p><code>() => void</code></p>
|
|
548
|
+
<p>SUB_SELF_CONN: Remove a random existing self loop.</p>
|
|
549
|
+
<h3 id="swapnodes">_swapNodes</h3><p><code>(method: any) => void</code></p>
|
|
550
|
+
<p>SWAP_NODES: Exchange bias & activation function between two random eligible nodes.</p>
|
|
551
|
+
<h3 id="mutateimpl">mutateImpl</h3><p><code>(method: any) => void</code></p>
|
|
552
|
+
<p>Public entry point: apply a single mutation operator to the network.</p>
|
|
553
|
+
<p>Steps:</p>
|
|
554
|
+
<ol>
|
|
555
|
+
<li>Validate the supplied method (enum value or descriptor object).</li>
|
|
556
|
+
<li>Resolve helper implementation from the dispatch map (supports objects exposing name/type/identity).</li>
|
|
557
|
+
<li>Invoke helper (passing through method for parameterized operators).</li>
|
|
558
|
+
<li>Flag topology caches dirty so ordering / slabs rebuild lazily.</li>
|
|
559
|
+
</ol>
|
|
560
|
+
<p>Accepts either the raw enum value (e.g. <code>mutation.ADD_NODE</code>) or an object carrying an
|
|
561
|
+
identifying <code>name | type | identity</code> field allowing future parameterization without breaking call sites.</p>
|
|
562
|
+
<p>Parameters:</p>
|
|
563
|
+
<ul>
|
|
564
|
+
<li><code>this</code> - Network instance (bound).</li>
|
|
565
|
+
<li><code>method</code> - Mutation enum value or descriptor object.</li>
|
|
566
|
+
</ul>
|
|
567
|
+
<h2 id="architecture-network-network-onnx-ts">architecture/network/network.onnx.ts</h2><h3 id="assignactivationfunctions">assignActivationFunctions</h3><p><code>(network: import("D:/code-practice/NeatapticTS/src/architecture/network").default, onnx: import("D:/code-practice/NeatapticTS/src/architecture/network/network.onnx").OnnxModel, hiddenLayerSizes: number[]) => void</code></p>
|
|
568
|
+
<p>Map activation op_types from ONNX nodes back to internal activation functions.</p>
|
|
569
|
+
<h3 id="assignweightsandbiases">assignWeightsAndBiases</h3><p><code>(network: import("D:/code-practice/NeatapticTS/src/architecture/network").default, onnx: import("D:/code-practice/NeatapticTS/src/architecture/network/network.onnx").OnnxModel, hiddenLayerSizes: number[]) => void</code></p>
|
|
570
|
+
<p>Apply weights & biases from ONNX initializers onto the newly created network.</p>
|
|
571
|
+
<h3 id="buildonnxmodel">buildOnnxModel</h3><p><code>(network: import("D:/code-practice/NeatapticTS/src/architecture/network").default, layers: any[][]) => import("D:/code-practice/NeatapticTS/src/architecture/network/network.onnx").OnnxModel</code></p>
|
|
572
|
+
<p>Construct the ONNX model graph (initializers + nodes) given validated layers.</p>
|
|
573
|
+
<h3 id="derivehiddenlayersizes">deriveHiddenLayerSizes</h3><p><code>(initializers: OnnxTensor[]) => number[]</code></p>
|
|
574
|
+
<p>Extract hidden layer sizes from ONNX initializers (weight tensors).</p>
|
|
575
|
+
<h3 id="exporttoonnx">exportToONNX</h3><p><code>(network: import("D:/code-practice/NeatapticTS/src/architecture/network").default) => import("D:/code-practice/NeatapticTS/src/architecture/network/network.onnx").OnnxModel</code></p>
|
|
576
|
+
<p>Export a minimal multilayer perceptron Network to a lightweight ONNX JSON object.</p>
|
|
577
|
+
<p>Steps:</p>
|
|
578
|
+
<ol>
|
|
579
|
+
<li>Rebuild connection cache ensuring up-to-date adjacency.</li>
|
|
580
|
+
<li>Index nodes for error messaging.</li>
|
|
581
|
+
<li>Infer strict layer ordering (throws if structure unsupported).</li>
|
|
582
|
+
<li>Validate homogeneity & full connectivity layer-to-layer.</li>
|
|
583
|
+
<li>Build initializer tensors (weights + biases) and node list (Gemm + activation pairs).</li>
|
|
584
|
+
</ol>
|
|
585
|
+
<p>Constraints: See module doc. Throws descriptive errors when assumptions violated.</p>
|
|
586
|
+
<h3 id="importfromonnx">importFromONNX</h3><p><code>(onnx: import("D:/code-practice/NeatapticTS/src/architecture/network/network.onnx").OnnxModel) => import("D:/code-practice/NeatapticTS/src/architecture/network").default</code></p>
|
|
587
|
+
<p>Import a model previously produced by {@link exportToONNX} into a fresh Network instance.</p>
|
|
588
|
+
<p>Steps:</p>
|
|
589
|
+
<ol>
|
|
590
|
+
<li>Read input/output dimensions.</li>
|
|
591
|
+
<li>Derive hidden layer sizes from weight tensor shapes.</li>
|
|
592
|
+
<li>Create corresponding MLP with identical layer counts.</li>
|
|
593
|
+
<li>Assign weights & biases.</li>
|
|
594
|
+
<li>Map activation op_types back to internal activation functions.</li>
|
|
595
|
+
<li>Rebuild flat connection list.</li>
|
|
596
|
+
</ol>
|
|
597
|
+
<p>Limitations: Only guaranteed for self-produced ONNX; inconsistent naming or ordering will break.</p>
|
|
598
|
+
<h3 id="inferlayerordering">inferLayerOrdering</h3><p><code>(network: import("D:/code-practice/NeatapticTS/src/architecture/network").default) => any[][]</code></p>
|
|
599
|
+
<p>Infer strictly layered ordering from a network, ensuring feed-forward fully-connected structure.</p>
|
|
600
|
+
<h3 id="mapactivationtoonnx">mapActivationToOnnx</h3><p><code>(squash: any) => string</code></p>
|
|
601
|
+
<p>Map an internal activation function (squash) to an ONNX op_type, defaulting to Identity.</p>
|
|
602
|
+
<h3 id="onnxmodel">OnnxModel</h3><h3 id="rebuildconnectionslocal">rebuildConnectionsLocal</h3><p><code>(networkLike: any) => void</code></p>
|
|
603
|
+
<p>Rebuild the network's flat connections array from each node's outgoing list (avoids circular import).</p>
|
|
604
|
+
<h3 id="validatelayerhomogeneityandconnectivity">validateLayerHomogeneityAndConnectivity</h3><p><code>(layers: any[][], network: import("D:/code-practice/NeatapticTS/src/architecture/network").default) => void</code></p>
|
|
605
|
+
<p>Validate that each non-input layer has homogeneous activation and is fully connected from previous layer.</p>
|
|
606
|
+
<h2 id="architecture-network-network-prune-ts">architecture/network/network.prune.ts</h2><h3 id="getcurrentsparsity">getCurrentSparsity</h3><p><code>() => number</code></p>
|
|
607
|
+
<p>Current sparsity fraction relative to the training-time pruning baseline.</p>
|
|
608
|
+
<h3 id="maybeprune">maybePrune</h3><p><code>(iteration: number) => void</code></p>
|
|
609
|
+
<p>Opportunistically perform scheduled pruning during gradient-based training.</p>
|
|
610
|
+
<p>Scheduling model:</p>
|
|
611
|
+
<ul>
|
|
612
|
+
<li>start / end define an iteration window (inclusive) during which pruning may occur</li>
|
|
613
|
+
<li>frequency defines cadence (every N iterations inside the window)</li>
|
|
614
|
+
<li>targetSparsity is linearly annealed from 0 to its final value across the window</li>
|
|
615
|
+
<li>method chooses ranking heuristic (magnitude | snip)</li>
|
|
616
|
+
<li>optional regrowFraction allows dynamic sparse training: after removing edges we probabilistically regrow
|
|
617
|
+
a fraction of them at random unused positions (respecting acyclic constraint if enforced)</li>
|
|
618
|
+
</ul>
|
|
619
|
+
<p>SNIP heuristic:</p>
|
|
620
|
+
<ul>
|
|
621
|
+
<li>Uses |w * grad| style saliency approximation (here reusing stored delta stats as gradient proxy)</li>
|
|
622
|
+
<li>Falls back to pure magnitude if gradient stats absent.</li>
|
|
623
|
+
</ul>
|
|
624
|
+
<h3 id="prunetosparsity">pruneToSparsity</h3><p><code>(targetSparsity: number, method: "magnitude" | "snip") => void</code></p>
|
|
625
|
+
<p>Evolutionary (generation-based) pruning toward a target sparsity baseline.
|
|
626
|
+
Unlike maybePrune this operates immediately relative to the first invocation's connection count
|
|
627
|
+
(stored separately as _evoInitialConnCount) and does not implement scheduling or regrowth.</p>
|
|
628
|
+
<h3 id="rankconnections">rankConnections</h3><p><code>(conns: import("D:/code-practice/NeatapticTS/src/architecture/connection").default[], method: "magnitude" | "snip") => import("D:/code-practice/NeatapticTS/src/architecture/connection").default[]</code></p>
|
|
629
|
+
<p>Structured and dynamic pruning utilities for networks.</p>
|
|
630
|
+
<p>Features:</p>
|
|
631
|
+
<ul>
|
|
632
|
+
<li>Scheduled pruning during gradient-based training ({@link maybePrune}) with linear sparsity ramp.</li>
|
|
633
|
+
<li>Evolutionary generation pruning toward a target sparsity ({@link pruneToSparsity}).</li>
|
|
634
|
+
<li>Two ranking heuristics:
|
|
635
|
+
magnitude: |w|
|
|
636
|
+
snip: |w * g| approximation (g approximated via accumulated delta stats; falls back to |w|)</li>
|
|
637
|
+
<li>Optional stochastic regrowth during scheduled pruning (dynamic sparse training), preserving acyclic constraints.</li>
|
|
638
|
+
</ul>
|
|
639
|
+
<p>Internal State Fields (attached to Network via <code>any</code> casting):</p>
|
|
640
|
+
<ul>
|
|
641
|
+
<li>_pruningConfig: user-specified schedule & options (start, end, frequency, targetSparsity, method, regrowFraction, lastPruneIter)</li>
|
|
642
|
+
<li>_initialConnectionCount: baseline connection count captured outside (first training iteration)</li>
|
|
643
|
+
<li>_evoInitialConnCount: baseline for evolutionary pruning (first invocation of pruneToSparsity)</li>
|
|
644
|
+
<li>_rand: deterministic RNG function</li>
|
|
645
|
+
<li>_enforceAcyclic: boolean flag enforcing forward-only connectivity ordering</li>
|
|
646
|
+
<li>_topoDirty: topology order invalidation flag consumed by activation fast path / topological sorting</li>
|
|
647
|
+
</ul>
|
|
648
|
+
<h3 id="regrowconnections">regrowConnections</h3><p><code>(network: import("D:/code-practice/NeatapticTS/src/architecture/network").default, desiredRemaining: number, maxAttempts: number) => void</code></p>
|
|
649
|
+
<p>Attempt stochastic regrowth of pruned connections up to a desired remaining count.</p>
|
|
650
|
+
<h2 id="architecture-network-network-remove-ts">architecture/network/network.remove.ts</h2><h3 id="removenode">removeNode</h3><p><code>(node: import("D:/code-practice/NeatapticTS/src/architecture/node").default) => void</code></p>
|
|
651
|
+
<p>Node removal utilities.</p>
|
|
652
|
+
<p>This module provides a focused implementation for removing a single hidden node from a network
|
|
653
|
+
while attempting to preserve overall functional connectivity. The removal procedure mirrors the
|
|
654
|
+
legacy Neataptic logic but augments it with clearer documentation and explicit invariants.</p>
|
|
655
|
+
<p>High‑level algorithm (removeNode):</p>
|
|
656
|
+
<ol>
|
|
657
|
+
<li>Guard: ensure the node exists and is not an input or output (those are structural anchors).</li>
|
|
658
|
+
<li>Ungate: detach any connections gated BY the node (we don't currently reassign gater roles).</li>
|
|
659
|
+
<li>Snapshot inbound / outbound connections (before mutation of adjacency lists).</li>
|
|
660
|
+
<li>Disconnect all inbound, outbound, and self connections.</li>
|
|
661
|
+
<li>Physically remove the node from the network's node array.</li>
|
|
662
|
+
<li>Simple path repair heuristic: for every former inbound source and outbound target, add a
|
|
663
|
+
direct connection if (a) both endpoints still exist, (b) they are distinct, and (c) no
|
|
664
|
+
direct connection already exists. This keeps forward information flow possibilities.</li>
|
|
665
|
+
<li>Mark topology / caches dirty so that subsequent activation / ordering passes rebuild state.</li>
|
|
666
|
+
</ol>
|
|
667
|
+
<p>Notes / Limitations:</p>
|
|
668
|
+
<ul>
|
|
669
|
+
<li>We do NOT attempt to clone weights or distribute the removed node's function across new
|
|
670
|
+
connections (more sophisticated strategies could average or compose weights).</li>
|
|
671
|
+
<li>Gating effects involving the removed node as a gater are dropped; downstream behavior may
|
|
672
|
+
change—callers relying heavily on gating may want a custom remap strategy.</li>
|
|
673
|
+
<li>Self connections are simply removed; no attempt is made to emulate recursion via alternative
|
|
674
|
+
structures.</li>
|
|
675
|
+
</ul>
|
|
676
|
+
<h2 id="architecture-network-network-serialize-ts">architecture/network/network.serialize.ts</h2><h3 id="deserialize">deserialize</h3><p><code>(data: any[], inputSize: number | undefined, outputSize: number | undefined) => import("D:/code-practice/NeatapticTS/src/architecture/network").default</code></p>
|
|
677
|
+
<p>Static counterpart to {@link serialize}. Rebuilds a Network from the compact tuple form.
|
|
678
|
+
Accepts optional explicit input/output size overrides (useful when piping through evolvers that trim IO).</p>
|
|
679
|
+
<h3 id="fromjsonimpl">fromJSONImpl</h3><p><code>(json: any) => import("D:/code-practice/NeatapticTS/src/architecture/network").default</code></p>
|
|
680
|
+
<p>Reconstruct a Network from the verbose JSON produced by {@link toJSONImpl} (formatVersion 2).
|
|
681
|
+
Defensive parsing retains forward compatibility (warns on unknown versions rather than aborting).</p>
|
|
682
|
+
<h3 id="network-serialize">network.serialize</h3><h3 id="serialize">serialize</h3><p><code>() => any[]</code></p>
|
|
683
|
+
<p>Serialization & deserialization helpers for Network instances.</p>
|
|
684
|
+
<p>Provides two independent formats:</p>
|
|
685
|
+
<ol>
|
|
686
|
+
<li>Compact tuple (serialize/deserialize): optimized for fast structured clone / worker transfer.</li>
|
|
687
|
+
<li>Verbose JSON (toJSONImpl/fromJSONImpl): stable, versioned representation retaining structural genes.</li>
|
|
688
|
+
</ol>
|
|
689
|
+
<p>Compact tuple format layout:
|
|
690
|
+
[ activations: number[], states: number[], squashes: string[],
|
|
691
|
+
connections: { from:number; to:number; weight:number; gater:number|null }[],
|
|
692
|
+
inputSize: number, outputSize: number ]</p>
|
|
693
|
+
<p>Design Principles:</p>
|
|
694
|
+
<ul>
|
|
695
|
+
<li>Avoid deep nested objects to reduce serialization overhead.</li>
|
|
696
|
+
<li>Use current node ordering as canonical index mapping (caller must keep ordering stable between peers).</li>
|
|
697
|
+
<li>Include current activation/state for scenarios resuming partially evaluated populations.</li>
|
|
698
|
+
<li>Self connections placed in the same array as normal connections for uniform reconstruction.</li>
|
|
699
|
+
</ul>
|
|
700
|
+
<p>Verbose JSON (formatVersion = 2) adds:</p>
|
|
701
|
+
<ul>
|
|
702
|
+
<li>Enabled flag for connections (innovation toggling).</li>
|
|
703
|
+
<li>Stable geneId (if tracked) on nodes.</li>
|
|
704
|
+
<li>Dropout probability.</li>
|
|
705
|
+
</ul>
|
|
706
|
+
<p>Future Ideas:</p>
|
|
707
|
+
<ul>
|
|
708
|
+
<li>Delta / patch serialization for large evolving populations.</li>
|
|
709
|
+
<li>Compressed binary packing (e.g., Float32Array segments) for WASM pipelines.</li>
|
|
710
|
+
</ul>
|
|
711
|
+
<h3 id="tojsonimpl">toJSONImpl</h3><p><code>() => object</code></p>
|
|
712
|
+
<p>Verbose JSON export (stable formatVersion). Omits transient runtime fields but keeps structural genetics.
|
|
713
|
+
formatVersion=2 adds: enabled flags, stable geneId (if present), dropout value.</p>
|
|
714
|
+
<h3 id="default">default</h3><h4 id="acquire">acquire</h4><p><code>(from: import("D:/code-practice/NeatapticTS/src/architecture/node").default, to: import("D:/code-practice/NeatapticTS/src/architecture/node").default, weight: number | undefined) => import("D:/code-practice/NeatapticTS/src/architecture/connection").default</code></p>
|
|
715
|
+
<p>Acquire a Connection from the pool or construct a new one. Ensures fresh innovation id.</p>
|
|
716
|
+
<h4 id="innovationid">innovationID</h4><p><code>(a: number, b: number) => number</code></p>
|
|
717
|
+
<p>Generates a unique innovation ID for the connection.</p>
|
|
718
|
+
<p>The innovation ID is calculated using the Cantor pairing function, which maps two integers
|
|
719
|
+
(representing the source and target nodes) to a unique integer.</p>
|
|
720
|
+
<p>Parameters:</p>
|
|
721
|
+
<ul>
|
|
722
|
+
<li>`` - - The ID of the source node.</li>
|
|
723
|
+
<li>`` - - The ID of the target node.</li>
|
|
724
|
+
</ul>
|
|
725
|
+
<p>Returns: The innovation ID based on the Cantor pairing function.</p>
|
|
726
|
+
<h4 id="release">release</h4><p><code>(conn: import("D:/code-practice/NeatapticTS/src/architecture/connection").default) => void</code></p>
|
|
727
|
+
<p>Return a Connection to the pool for reuse.</p>
|
|
728
|
+
<h4 id="tojson">toJSON</h4><p><code>() => any</code></p>
|
|
729
|
+
<p>Converts the connection to a JSON object for serialization.</p>
|
|
730
|
+
<p>Returns: A JSON representation of the connection.</p>
|
|
731
|
+
<h2 id="architecture-network-network-slab-ts">architecture/network/network.slab.ts</h2><h3 id="canusefastslab">canUseFastSlab</h3><p><code>(training: boolean) => boolean</code></p>
|
|
732
|
+
<p>Public helper: indicates whether fast slab path is currently viable.</p>
|
|
733
|
+
<h3 id="fastslabactivate">fastSlabActivate</h3><p><code>(input: number[]) => number[]</code></p>
|
|
734
|
+
<p>High-performance forward pass using packed slabs + CSR adjacency.
|
|
735
|
+
Falls back to generic activate if prerequisites unavailable.</p>
|
|
736
|
+
<h3 id="getconnectionslab">getConnectionSlab</h3><p><code>() => { weights: any; from: any; to: any; }</code></p>
|
|
737
|
+
<p>Return current slab (building lazily).</p>
|
|
738
|
+
<h3 id="rebuildconnectionslab">rebuildConnectionSlab</h3><p><code>(force: boolean) => void</code></p>
|
|
739
|
+
<p>Fast slab (structure-of-arrays) acceleration layer.</p>
|
|
740
|
+
<p>Rationale:
|
|
741
|
+
Typical neural network graphs represented as object graphs incur significant overhead during
|
|
742
|
+
forward passes due to pointer chasing (cache misses) and dynamic property lookups. For large
|
|
743
|
+
evolving populations where topologies change infrequently compared to evaluation frequency,
|
|
744
|
+
we can amortize a one-off packing cost into contiguous typed arrays, dramatically improving
|
|
745
|
+
memory locality and enabling tight inner loops.</p>
|
|
746
|
+
<p>Core Data Structures:</p>
|
|
747
|
+
<ul>
|
|
748
|
+
<li>weightArray (Float32Array|Float64Array): connection weights</li>
|
|
749
|
+
<li>fromIndexArray (Uint32Array): source node indices per connection</li>
|
|
750
|
+
<li>toIndexArray (Uint32Array): destination node indices per connection</li>
|
|
751
|
+
<li>outgoingStartIndices (Uint32Array length = nodeCount + 1): CSR row pointer style offsets</li>
|
|
752
|
+
<li>outgoingOrder (Uint32Array): permutation of connection indices grouped by source node</li>
|
|
753
|
+
</ul>
|
|
754
|
+
<p>Workflow:</p>
|
|
755
|
+
<ol>
|
|
756
|
+
<li>rebuildConnectionSlab packs connections into SoA arrays when dirty.</li>
|
|
757
|
+
<li>_buildAdjacency converts fromIndexArray into CSR-like adjacency for each source node.</li>
|
|
758
|
+
<li>fastSlabActivate uses the packed arrays + precomputed topological order to perform a forward pass
|
|
759
|
+
with minimal branching and object access.</li>
|
|
760
|
+
</ol>
|
|
761
|
+
<p>Constraints for Fast Path (_canUseFastSlab):</p>
|
|
762
|
+
<ul>
|
|
763
|
+
<li>Acyclic enforced (no recurrence) so single topological sweep suffices.</li>
|
|
764
|
+
<li>No gating, self-connections, dropout, stochastic depth, or per-hidden noise.</li>
|
|
765
|
+
<li>Topological order and node indices must be clean.</li>
|
|
766
|
+
</ul>
|
|
767
|
+
<p>Dirty Flags Touched:</p>
|
|
768
|
+
<ul>
|
|
769
|
+
<li>_slabDirty: slab arrays need rebuild</li>
|
|
770
|
+
<li>_adjDirty: adjacency mapping (CSR) invalid</li>
|
|
771
|
+
<li>_nodeIndexDirty: node.index values invalid</li>
|
|
772
|
+
<li>_topoDirty: topological ordering invalid</li>
|
|
773
|
+
</ul>
|
|
774
|
+
<h2 id="architecture-network-network-standalone-ts">architecture/network/network.standalone.ts</h2><h3 id="generatestandalone">generateStandalone</h3><p><code>(net: import("D:/code-practice/NeatapticTS/src/architecture/network").default) => string</code></p>
|
|
775
|
+
<p>Generate a standalone JavaScript source string that returns an <code>activate(input:number[])</code> function.</p>
|
|
776
|
+
<p>Implementation Steps:</p>
|
|
777
|
+
<ol>
|
|
778
|
+
<li>Validate presence of output nodes (must produce something observable).</li>
|
|
779
|
+
<li>Assign stable sequential indices to nodes (used as array offsets in generated code).</li>
|
|
780
|
+
<li>Collect initial activation/state values into typed array initializers for warm starting.</li>
|
|
781
|
+
<li>For each non-input node, build a line computing S[i] (pre-activation sum with bias) and A[i]
|
|
782
|
+
(post-activation output). Gating multiplies activation by gate activations; self-connection adds
|
|
783
|
+
recurrent term S[i] * weight before activation.</li>
|
|
784
|
+
<li>De-duplicate activation functions: each unique squash name is emitted once; references become
|
|
785
|
+
indices into array F of function references for compactness.</li>
|
|
786
|
+
<li>Emit an IIFE producing the activate function with internal arrays A (activations) and S (states).</li>
|
|
787
|
+
</ol>
|
|
788
|
+
<p>Parameters:</p>
|
|
789
|
+
<ul>
|
|
790
|
+
<li><code>net</code> - Network instance to snapshot.</li>
|
|
791
|
+
</ul>
|
|
792
|
+
<p>Returns: Source string (ES5-compatible) – safe to eval in sandbox to obtain activate function.</p>
|
|
793
|
+
<h2 id="architecture-network-network-stats-ts">architecture/network/network.stats.ts</h2><h3 id="deepclonevalue">deepCloneValue</h3><p><code>(value: T) => T</code></p>
|
|
794
|
+
<p>Network statistics accessors.</p>
|
|
795
|
+
<p>Currently exposes a single helper for retrieving the most recent regularization / stochasticity
|
|
796
|
+
metrics snapshot recorded during training or evaluation. The internal <code>_lastStats</code> field (on the
|
|
797
|
+
Network instance, typed as any) is expected to be populated elsewhere in the training loop with
|
|
798
|
+
values such as:</p>
|
|
799
|
+
<ul>
|
|
800
|
+
<li>l1Penalty, l2Penalty</li>
|
|
801
|
+
<li>dropoutApplied (fraction of units dropped last pass)</li>
|
|
802
|
+
<li>weightNoiseStd (effective std dev used if noise injected)</li>
|
|
803
|
+
<li>sparsityRatio, prunedConnections</li>
|
|
804
|
+
<li>any custom user extensions (object is not strictly typed to allow experimentation)</li>
|
|
805
|
+
</ul>
|
|
806
|
+
<p>Design decision: We return a deep copy to prevent external mutation of internal accounting state.
|
|
807
|
+
If the object is large and copying becomes a bottleneck, future versions could offer a freeze
|
|
808
|
+
option or incremental diff interface.</p>
|
|
809
|
+
<h3 id="getregularizationstats">getRegularizationStats</h3><p><code>() => any</code></p>
|
|
810
|
+
<p>Obtain the last recorded regularization / stochastic statistics snapshot.</p>
|
|
811
|
+
<p>Returns a defensive deep copy so callers can inspect metrics without risking mutation of the
|
|
812
|
+
internal <code>_lastStats</code> object maintained by the training loop (e.g., during pruning, dropout, or
|
|
813
|
+
noise scheduling updates).</p>
|
|
814
|
+
<p>Returns: A deep-cloned stats object or null if no stats have been recorded yet.</p>
|
|
815
|
+
<h2 id="architecture-network-network-topology-ts">architecture/network/network.topology.ts</h2><h3 id="computetopoorder">computeTopoOrder</h3><p><code>() => void</code></p>
|
|
816
|
+
<p>Topology utilities.</p>
|
|
817
|
+
<p>Provides:</p>
|
|
818
|
+
<ul>
|
|
819
|
+
<li>computeTopoOrder: Kahn-style topological sorting with graceful fallback when cycles detected.</li>
|
|
820
|
+
<li>hasPath: depth-first reachability query (used to prevent cycle introduction when acyclicity enforced).</li>
|
|
821
|
+
</ul>
|
|
822
|
+
<p>Design Notes:</p>
|
|
823
|
+
<ul>
|
|
824
|
+
<li>We deliberately tolerate cycles by falling back to raw node ordering instead of throwing; this
|
|
825
|
+
allows callers performing interim structural mutations to proceed (e.g. during evolve phases)
|
|
826
|
+
while signaling that the fast acyclic optimizations should not be used.</li>
|
|
827
|
+
<li>Input nodes are seeded into the queue immediately regardless of in-degree to keep them early in
|
|
828
|
+
the ordering even if an unusual inbound edge was added (defensive redundancy).</li>
|
|
829
|
+
<li>Self loops are ignored for in-degree accounting and queue progression (they neither unlock new
|
|
830
|
+
nodes nor should they block ordering completion).</li>
|
|
831
|
+
</ul>
|
|
832
|
+
<h3 id="haspath">hasPath</h3><p><code>(from: import("D:/code-practice/NeatapticTS/src/architecture/node").default, to: import("D:/code-practice/NeatapticTS/src/architecture/node").default) => boolean</code></p>
|
|
833
|
+
<p>Depth-first reachability test (avoids infinite loops via visited set).</p>
|
|
834
|
+
<h2 id="architecture-network-network-training-ts">architecture/network/network.training.ts</h2><h3 id="traininginternals">__trainingInternals</h3><h3 id="applygradientclippingimpl">applyGradientClippingImpl</h3><p><code>(net: import("D:/code-practice/NeatapticTS/src/architecture/network").default, cfg: { mode: "norm" | "percentile" | "layerwiseNorm" | "layerwisePercentile"; maxNorm?: number | undefined; percentile?: number | undefined; }) => void</code></p>
|
|
835
|
+
<p>Apply gradient clipping to accumulated connection deltas / bias deltas.</p>
|
|
836
|
+
<p>Modes:</p>
|
|
837
|
+
<ul>
|
|
838
|
+
<li>norm / layerwiseNorm: L2 norm scaling (global vs per group).</li>
|
|
839
|
+
<li>percentile / layerwisePercentile: element-wise clamp at absolute percentile threshold.</li>
|
|
840
|
+
</ul>
|
|
841
|
+
<p>Grouping:</p>
|
|
842
|
+
<ul>
|
|
843
|
+
<li>If layerwise* and net.layers exists -> each defined layer is a group.</li>
|
|
844
|
+
<li>Else if layerwise* -> each non-input node becomes its own group.</li>
|
|
845
|
+
<li>Otherwise a single global group containing all learnable params.</li>
|
|
846
|
+
</ul>
|
|
847
|
+
<h3 id="applyoptimizerstep">applyOptimizerStep</h3><p><code>(net: import("D:/code-practice/NeatapticTS/src/architecture/network").default, optimizer: any, currentRate: number, momentum: number, internalNet: any) => number</code></p>
|
|
848
|
+
<p>Apply optimizer update step across all nodes; returns gradient L2 norm (approx).</p>
|
|
849
|
+
<h3 id="averageaccumulatedgradients">averageAccumulatedGradients</h3><p><code>(net: import("D:/code-practice/NeatapticTS/src/architecture/network").default, accumulationSteps: number) => void</code></p>
|
|
850
|
+
<p>Divide accumulated gradients by accumulationSteps (average reduction mode).</p>
|
|
851
|
+
<h3 id="checkpointconfig">CheckpointConfig</h3><p>Checkpoint callback spec.</p>
|
|
852
|
+
<h3 id="computemonitorederror">computeMonitoredError</h3><p><code>(trainError: number, recentErrors: number[], cfg: MonitoredSmoothingConfig, state: PrimarySmoothingState) => number</code></p>
|
|
853
|
+
<p>Compute the monitored (primary) smoothed error given recent raw errors.</p>
|
|
854
|
+
<p>Behavior:</p>
|
|
855
|
+
<ul>
|
|
856
|
+
<li>For SMA-like strategies uses the supplied window slice directly.</li>
|
|
857
|
+
<li>For EMA it mutates state.emaValue.</li>
|
|
858
|
+
<li>For adaptive-ema maintains dual EMA tracks inside state and returns the min for stability.</li>
|
|
859
|
+
<li>For median / gaussian / trimmed / wma applies algorithmic weighting as documented inline.</li>
|
|
860
|
+
</ul>
|
|
861
|
+
<p>Inputs:</p>
|
|
862
|
+
<ul>
|
|
863
|
+
<li>trainError: Current raw mean error for this iteration.</li>
|
|
864
|
+
<li>recentErrors: Chronological array (oldest->newest) of last N raw errors.</li>
|
|
865
|
+
<li>cfg: Algorithm selection + parameters.</li>
|
|
866
|
+
<li>state: Mutable smoothing state (ema / adaptive fields updated in-place).</li>
|
|
867
|
+
</ul>
|
|
868
|
+
<p>Returns: Smoothed/monitored error metric (may equal trainError if no smoothing active).</p>
|
|
869
|
+
<h3 id="computeplateaumetric">computePlateauMetric</h3><p><code>(trainError: number, plateauErrors: number[], cfg: PlateauSmoothingConfig, state: PlateauSmoothingState) => number</code></p>
|
|
870
|
+
<p>Compute plateau metric (may differ in strategy from primary monitored error).
|
|
871
|
+
Only algorithms actually supported for plateau in current pipeline are SMA, median and EMA.
|
|
872
|
+
Provided flexibility keeps room for extension; unsupported types silently fallback to mean.</p>
|
|
873
|
+
<h3 id="costfunction">CostFunction</h3><p><code>(target: number[], output: number[]) => number</code></p>
|
|
874
|
+
<hr>
|
|
875
|
+
<h2 id="internal-type-definitions-documentation-only-optional-for-callers">Internal Type Definitions (documentation only; optional for callers)</h2><h3 id="detectmixedprecisionoverflow">detectMixedPrecisionOverflow</h3><p><code>(net: import("D:/code-practice/NeatapticTS/src/architecture/network").default, internalNet: any) => boolean</code></p>
|
|
876
|
+
<p>Detect mixed precision overflow (NaN / Inf) in bias values if mixed precision enabled.
|
|
877
|
+
Side-effect: may clear internal trigger _forceNextOverflow.</p>
|
|
878
|
+
<h3 id="gradientclipconfig">GradientClipConfig</h3><p>Gradient clipping configuration accepted by options.gradientClip.</p>
|
|
879
|
+
<h3 id="handleoverflow">handleOverflow</h3><p><code>(internalNet: any) => void</code></p>
|
|
880
|
+
<p>Respond to a mixed precision overflow by shrinking loss scale & bookkeeping.</p>
|
|
881
|
+
<h3 id="maybeincreaselossscale">maybeIncreaseLossScale</h3><p><code>(internalNet: any) => void</code></p>
|
|
882
|
+
<p>Update dynamic loss scaling after a successful (non-overflow) optimizer step.</p>
|
|
883
|
+
<h3 id="metricshook">MetricsHook</h3><p><code>(m: { iteration: number; error: number; plateauError?: number | undefined; gradNorm: number; }) => void</code></p>
|
|
884
|
+
<p>Metrics hook signature.</p>
|
|
885
|
+
<h3 id="mixedprecisionconfig">MixedPrecisionConfig</h3><h3 id="mixedprecisiondynamicconfig">MixedPrecisionDynamicConfig</h3><p>Mixed precision configuration.</p>
|
|
886
|
+
<h3 id="monitoredsmoothingconfig">MonitoredSmoothingConfig</h3><p>Configuration passed to monitored (primary) smoothing computation.</p>
|
|
887
|
+
<h3 id="movingaveragetype">MovingAverageType</h3><p>Moving average strategy identifiers.</p>
|
|
888
|
+
<h3 id="optimizerconfigbase">OptimizerConfigBase</h3><p>Optimizer configuration (subset – delegated to node.applyBatchUpdatesWithOptimizer).</p>
|
|
889
|
+
<h3 id="plateausmoothingconfig">PlateauSmoothingConfig</h3><p>Configuration for plateau smoothing computation.</p>
|
|
890
|
+
<h3 id="plateausmoothingstate">PlateauSmoothingState</h3><p>State container for plateau EMA smoothing.</p>
|
|
891
|
+
<h3 id="primarysmoothingstate">PrimarySmoothingState</h3><hr>
|
|
892
|
+
<h2 id="internal-helper-utilities-non-exported">Internal Helper Utilities (non-exported)</h2><p>These functions encapsulate cohesive sub-steps of the training pipeline so the
|
|
893
|
+
main exported functions remain readable while preserving original behavior.
|
|
894
|
+
Each helper is intentionally pure where reasonable or documents its side-effects.</p>
|
|
895
|
+
<h3 id="scheduleconfig">ScheduleConfig</h3><p>Schedule hook executed every N iterations.</p>
|
|
896
|
+
<h3 id="trainimpl">trainImpl</h3><p><code>(net: import("D:/code-practice/NeatapticTS/src/architecture/network").default, set: { input: number[]; output: number[]; }[], options: import("D:/code-practice/NeatapticTS/src/architecture/network/network.training").TrainingOptions) => { error: number; iterations: number; time: number; }</code></p>
|
|
897
|
+
<p>High-level training orchestration with early stopping, smoothing & callbacks.</p>
|
|
898
|
+
<h3 id="trainingoptions">TrainingOptions</h3><p>Primary training options object (public shape).</p>
|
|
899
|
+
<h3 id="trainsetimpl">trainSetImpl</h3><p><code>(net: import("D:/code-practice/NeatapticTS/src/architecture/network").default, set: { input: number[]; output: number[]; }[], batchSize: number, accumulationSteps: number, currentRate: number, momentum: number, regularization: any, costFunction: (target: number[], output: number[]) => number, optimizer: any) => number</code></p>
|
|
900
|
+
<p>Execute one full pass over dataset (epoch) with optional accumulation & adaptive optimizer.
|
|
901
|
+
Returns mean cost across processed samples.</p>
|
|
902
|
+
<h3 id="zeroaccumulatedgradients">zeroAccumulatedGradients</h3><p><code>(net: import("D:/code-practice/NeatapticTS/src/architecture/network").default) => void</code></p>
|
|
903
|
+
<p>Zero-out accumulated gradient buffers after an overflow to discard invalid updates.</p>
|
|
904
|
+
|
|
905
|
+
<footer>Generated from JSDoc. <a href="https://github.com/reicek/NeatapticTS">GitHub</a></footer>
|
|
906
|
+
</main>
|
|
907
|
+
<aside class="page-index"><div class="page-toc"><h2>Files</h2><div class="toc-file"><a href="#architecture-network-network-activate-ts">architecture/network/network.activate.ts</a><ul><li><a href=#activatebatch>activateBatch</a></li><li><a href=#activateraw>activateRaw</a></li><li><a href=#notraceactivate>noTraceActivate</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-connect-ts">architecture/network/network.connect.ts</a><ul><li><a href=#connect>connect</a></li><li><a href=#disconnect>disconnect</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-deterministic-ts">architecture/network/network.deterministic.ts</a><ul><li><a href=#getrandomfn>getRandomFn</a></li><li><a href=#getrngstate>getRNGState</a></li><li><a href=#restorerng>restoreRNG</a></li><li><a href=#rngsnapshot>RNGSnapshot</a></li><li><a href=#setrngstate>setRNGState</a></li><li><a href=#setseed>setSeed</a></li><li><a href=#snapshotrng>snapshotRNG</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-evolve-ts">architecture/network/network.evolve.ts</a><ul><li><a href=#buildmultithreadfitness>buildMultiThreadFitness</a></li><li><a href=#buildsinglethreadfitness>buildSingleThreadFitness</a></li><li><a href=#computecomplexitypenalty>computeComplexityPenalty</a></li><li><a href=#evolutionconfig>EvolutionConfig</a></li><li><a href=#evolvenetwork>evolveNetwork</a></li><li><a href=#trainingsample>TrainingSample</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-gating-ts">architecture/network/network.gating.ts</a><ul><li><a href=#gate>gate</a></li><li><a href=#removenode>removeNode</a></li><li><a href=#ungate>ungate</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-genetic-ts">architecture/network/network.genetic.ts</a><ul><li><a href=#crossover>crossOver</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-mutate-ts">architecture/network/network.mutate.ts</a><ul><li><a href=#addbackconn>_addBackConn</a></li><li><a href=#addconn>_addConn</a></li><li><a href=#addgate>_addGate</a></li><li><a href=#addgrunode>_addGRUNode</a></li><li><a href=#addlstmnode>_addLSTMNode</a></li><li><a href=#addnode>_addNode</a></li><li><a href=#addselfconn>_addSelfConn</a></li><li><a href=#batchnorm>_batchNorm</a></li><li><a href=#modactivation>_modActivation</a></li><li><a href=#modbias>_modBias</a></li><li><a href=#modweight>_modWeight</a></li><li><a href=#reinitweight>_reinitWeight</a></li><li><a href=#subbackconn>_subBackConn</a></li><li><a href=#subconn>_subConn</a></li><li><a href=#subgate>_subGate</a></li><li><a href=#subnode>_subNode</a></li><li><a href=#subselfconn>_subSelfConn</a></li><li><a href=#swapnodes>_swapNodes</a></li><li><a href=#mutateimpl>mutateImpl</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-onnx-ts">architecture/network/network.onnx.ts</a><ul><li><a href=#assignactivationfunctions>assignActivationFunctions</a></li><li><a href=#assignweightsandbiases>assignWeightsAndBiases</a></li><li><a href=#buildonnxmodel>buildOnnxModel</a></li><li><a href=#derivehiddenlayersizes>deriveHiddenLayerSizes</a></li><li><a href=#exporttoonnx>exportToONNX</a></li><li><a href=#importfromonnx>importFromONNX</a></li><li><a href=#inferlayerordering>inferLayerOrdering</a></li><li><a href=#mapactivationtoonnx>mapActivationToOnnx</a></li><li><a href=#onnxmodel>OnnxModel</a></li><li><a href=#rebuildconnectionslocal>rebuildConnectionsLocal</a></li><li><a href=#validatelayerhomogeneityandconnectivity>validateLayerHomogeneityAndConnectivity</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-prune-ts">architecture/network/network.prune.ts</a><ul><li><a href=#getcurrentsparsity>getCurrentSparsity</a></li><li><a href=#maybeprune>maybePrune</a></li><li><a href=#prunetosparsity>pruneToSparsity</a></li><li><a href=#rankconnections>rankConnections</a></li><li><a href=#regrowconnections>regrowConnections</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-remove-ts">architecture/network/network.remove.ts</a><ul><li><a href=#removenode>removeNode</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-serialize-ts">architecture/network/network.serialize.ts</a><ul><li><a href=#deserialize>deserialize</a></li><li><a href=#fromjsonimpl>fromJSONImpl</a></li><li><a href=#serialize>serialize</a></li><li><a href=#tojsonimpl>toJSONImpl</a></li><li><a href=#default>default</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-slab-ts">architecture/network/network.slab.ts</a><ul><li><a href=#canusefastslab>canUseFastSlab</a></li><li><a href=#fastslabactivate>fastSlabActivate</a></li><li><a href=#getconnectionslab>getConnectionSlab</a></li><li><a href=#rebuildconnectionslab>rebuildConnectionSlab</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-standalone-ts">architecture/network/network.standalone.ts</a><ul><li><a href=#generatestandalone>generateStandalone</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-stats-ts">architecture/network/network.stats.ts</a><ul><li><a href=#deepclonevalue>deepCloneValue</a></li><li><a href=#getregularizationstats>getRegularizationStats</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-topology-ts">architecture/network/network.topology.ts</a><ul><li><a href=#computetopoorder>computeTopoOrder</a></li><li><a href=#haspath>hasPath</a></li></ul></div><div class="toc-file"><a href="#architecture-network-network-training-ts">architecture/network/network.training.ts</a><ul><li><a href=#traininginternals>__trainingInternals</a></li><li><a href=#applygradientclippingimpl>applyGradientClippingImpl</a></li><li><a href=#applyoptimizerstep>applyOptimizerStep</a></li><li><a href=#averageaccumulatedgradients>averageAccumulatedGradients</a></li><li><a href=#checkpointconfig>CheckpointConfig</a></li><li><a href=#computemonitorederror>computeMonitoredError</a></li><li><a href=#computeplateaumetric>computePlateauMetric</a></li><li><a href=#costfunction>CostFunction</a></li><li><a href=#detectmixedprecisionoverflow>detectMixedPrecisionOverflow</a></li><li><a href=#gradientclipconfig>GradientClipConfig</a></li><li><a href=#handleoverflow>handleOverflow</a></li><li><a href=#maybeincreaselossscale>maybeIncreaseLossScale</a></li><li><a href=#metricshook>MetricsHook</a></li><li><a href=#mixedprecisionconfig>MixedPrecisionConfig</a></li><li><a href=#mixedprecisiondynamicconfig>MixedPrecisionDynamicConfig</a></li><li><a href=#monitoredsmoothingconfig>MonitoredSmoothingConfig</a></li><li><a href=#movingaveragetype>MovingAverageType</a></li><li><a href=#optimizerconfigbase>OptimizerConfigBase</a></li><li><a href=#plateausmoothingconfig>PlateauSmoothingConfig</a></li><li><a href=#plateausmoothingstate>PlateauSmoothingState</a></li><li><a href=#primarysmoothingstate>PrimarySmoothingState</a></li><li><a href=#scheduleconfig>ScheduleConfig</a></li><li><a href=#trainimpl>trainImpl</a></li><li><a href=#trainingoptions>TrainingOptions</a></li><li><a href=#trainsetimpl>trainSetImpl</a></li><li><a href=#zeroaccumulatedgradients>zeroAccumulatedGradients</a></li></ul></div></div></aside>
|
|
908
|
+
</body></html>
|