@spiky-panda/core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/geometry/geometry.cartesian.d.ts +56 -0
- package/dist/geometry/geometry.cartesian.js +229 -0
- package/dist/geometry/geometry.cartesian.js.map +1 -0
- package/dist/geometry/geometry.interfaces.d.ts +37 -0
- package/dist/geometry/geometry.interfaces.js +31 -0
- package/dist/geometry/geometry.interfaces.js.map +1 -0
- package/dist/geometry/index.d.ts +2 -0
- package/dist/geometry/index.js +3 -0
- package/dist/geometry/index.js.map +1 -0
- package/dist/graph/graph.builder.graph.d.ts +44 -0
- package/dist/graph/graph.builder.graph.js +78 -0
- package/dist/graph/graph.builder.graph.js.map +1 -0
- package/dist/graph/graph.builder.node.d.ts +20 -0
- package/dist/graph/graph.builder.node.js +45 -0
- package/dist/graph/graph.builder.node.js.map +1 -0
- package/dist/graph/graph.builder.olink.d.ts +12 -0
- package/dist/graph/graph.builder.olink.js +32 -0
- package/dist/graph/graph.builder.olink.js.map +1 -0
- package/dist/graph/graph.graph.d.ts +13 -0
- package/dist/graph/graph.graph.js +26 -0
- package/dist/graph/graph.graph.js.map +1 -0
- package/dist/graph/graph.graphItem.d.ts +14 -0
- package/dist/graph/graph.graphItem.js +36 -0
- package/dist/graph/graph.graphItem.js.map +1 -0
- package/dist/graph/graph.interfaces.builder.d.ts +28 -0
- package/dist/graph/graph.interfaces.builder.js +2 -0
- package/dist/graph/graph.interfaces.builder.js.map +1 -0
- package/dist/graph/graph.interfaces.d.ts +59 -0
- package/dist/graph/graph.interfaces.js +51 -0
- package/dist/graph/graph.interfaces.js.map +1 -0
- package/dist/graph/graph.node.d.ts +12 -0
- package/dist/graph/graph.node.js +22 -0
- package/dist/graph/graph.node.js.map +1 -0
- package/dist/graph/graph.olink.d.ts +13 -0
- package/dist/graph/graph.olink.js +56 -0
- package/dist/graph/graph.olink.js.map +1 -0
- package/dist/graph/index.d.ts +9 -0
- package/dist/graph/index.js +10 -0
- package/dist/graph/index.js.map +1 -0
- package/dist/index.d.ts +6 -0
- package/dist/index.js +15 -0
- package/dist/index.js.map +1 -0
- package/dist/neuralnetwork/ann/index.d.ts +1 -0
- package/dist/neuralnetwork/ann/index.js +2 -0
- package/dist/neuralnetwork/ann/index.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/index.d.ts +9 -0
- package/dist/neuralnetwork/ann/mlp/index.js +10 -0
- package/dist/neuralnetwork/ann/mlp/index.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/mlp.activation.d.ts +22 -0
- package/dist/neuralnetwork/ann/mlp/mlp.activation.js +22 -0
- package/dist/neuralnetwork/ann/mlp/mlp.activation.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/mlp.builder.d.ts +43 -0
- package/dist/neuralnetwork/ann/mlp/mlp.builder.js +168 -0
- package/dist/neuralnetwork/ann/mlp/mlp.builder.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/mlp.graph.d.ts +7 -0
- package/dist/neuralnetwork/ann/mlp/mlp.graph.js +7 -0
- package/dist/neuralnetwork/ann/mlp/mlp.graph.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/mlp.inference.d.ts +13 -0
- package/dist/neuralnetwork/ann/mlp/mlp.inference.js +63 -0
- package/dist/neuralnetwork/ann/mlp/mlp.inference.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/mlp.interfaces.d.ts +21 -0
- package/dist/neuralnetwork/ann/mlp/mlp.interfaces.js +7 -0
- package/dist/neuralnetwork/ann/mlp/mlp.interfaces.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/mlp.neuron.d.ts +10 -0
- package/dist/neuralnetwork/ann/mlp/mlp.neuron.js +20 -0
- package/dist/neuralnetwork/ann/mlp/mlp.neuron.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/mlp.runtime.utils.d.ts +5 -0
- package/dist/neuralnetwork/ann/mlp/mlp.runtime.utils.js +48 -0
- package/dist/neuralnetwork/ann/mlp/mlp.runtime.utils.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/mlp.synapse.d.ts +6 -0
- package/dist/neuralnetwork/ann/mlp/mlp.synapse.js +7 -0
- package/dist/neuralnetwork/ann/mlp/mlp.synapse.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/training/index.d.ts +4 -0
- package/dist/neuralnetwork/ann/mlp/training/index.js +5 -0
- package/dist/neuralnetwork/ann/mlp/training/index.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/training/mlp.loss.d.ts +10 -0
- package/dist/neuralnetwork/ann/mlp/training/mlp.loss.js +11 -0
- package/dist/neuralnetwork/ann/mlp/training/mlp.loss.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/training/mlp.optimizers.d.ts +7 -0
- package/dist/neuralnetwork/ann/mlp/training/mlp.optimizers.js +59 -0
- package/dist/neuralnetwork/ann/mlp/training/mlp.optimizers.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/training/mlp.training.d.ts +22 -0
- package/dist/neuralnetwork/ann/mlp/training/mlp.training.interfaces.d.ts +27 -0
- package/dist/neuralnetwork/ann/mlp/training/mlp.training.interfaces.js +2 -0
- package/dist/neuralnetwork/ann/mlp/training/mlp.training.interfaces.js.map +1 -0
- package/dist/neuralnetwork/ann/mlp/training/mlp.training.js +107 -0
- package/dist/neuralnetwork/ann/mlp/training/mlp.training.js.map +1 -0
- package/dist/neuralnetwork/index.d.ts +8 -0
- package/dist/neuralnetwork/index.js +9 -0
- package/dist/neuralnetwork/index.js.map +1 -0
- package/dist/neuralnetwork/nn.builders.d.ts +21 -0
- package/dist/neuralnetwork/nn.builders.js +71 -0
- package/dist/neuralnetwork/nn.builders.js.map +1 -0
- package/dist/neuralnetwork/nn.interfaces.builder.d.ts +16 -0
- package/dist/neuralnetwork/nn.interfaces.builder.js +2 -0
- package/dist/neuralnetwork/nn.interfaces.builder.js.map +1 -0
- package/dist/neuralnetwork/nn.interfaces.d.ts +16 -0
- package/dist/neuralnetwork/nn.interfaces.js +18 -0
- package/dist/neuralnetwork/nn.interfaces.js.map +1 -0
- package/dist/neuralnetwork/nn.neuron.d.ts +8 -0
- package/dist/neuralnetwork/nn.neuron.js +10 -0
- package/dist/neuralnetwork/nn.neuron.js.map +1 -0
- package/dist/neuralnetwork/nn.synapse.d.ts +6 -0
- package/dist/neuralnetwork/nn.synapse.js +14 -0
- package/dist/neuralnetwork/nn.synapse.js.map +1 -0
- package/dist/neuralnetwork/nn.weights.d.ts +42 -0
- package/dist/neuralnetwork/nn.weights.js +120 -0
- package/dist/neuralnetwork/nn.weights.js.map +1 -0
- package/dist/neuralnetwork/snn/index.d.ts +3 -0
- package/dist/neuralnetwork/snn/index.js +4 -0
- package/dist/neuralnetwork/snn/index.js.map +1 -0
- package/dist/neuralnetwork/snn/spike.interfaces.d.ts +50 -0
- package/dist/neuralnetwork/snn/spike.interfaces.js +24 -0
- package/dist/neuralnetwork/snn/spike.interfaces.js.map +1 -0
- package/dist/neuralnetwork/snn/spike.runtime.d.ts +31 -0
- package/dist/neuralnetwork/snn/spike.runtime.js +90 -0
- package/dist/neuralnetwork/snn/spike.runtime.js.map +1 -0
- package/dist/neuralnetwork/snn/spike.stdp.d.ts +11 -0
- package/dist/neuralnetwork/snn/spike.stdp.js +33 -0
- package/dist/neuralnetwork/snn/spike.stdp.js.map +1 -0
- package/dist/types.d.ts +3 -0
- package/dist/types.js +2 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/csv.d.ts +6 -0
- package/dist/utils/csv.js +36 -0
- package/dist/utils/csv.js.map +1 -0
- package/dist/utils/index.d.ts +1 -0
- package/dist/utils/index.js +2 -0
- package/dist/utils/index.js.map +1 -0
- package/package.json +44 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"mlp.optimizers.js","sourceRoot":"","sources":["../../../../../src/neuralnetwork/ann/mlp/training/mlp.optimizers.ts"],"names":[],"mappings":"AAAA,aAAa;AACb,+CAA+C;AAI/C,MAAM,OAAO,UAAU;;AACnB,cAAc;AACA,cAAG,GAAG,GAAe,EAAE,CAAC,CAAC;IACnC,KAAK,CAAC,OAAO,EAAE,EAAE,EAAE,QAAQ,EAAE,GAAG;QAC5B,MAAM,GAAG,GAAG,CAAC,OAAO,CAAC,GAAG,KAAK,EAAE,QAAQ,EAAE,QAAQ,EAAE,CAA4B,CAAC;QAEhF,GAAG,CAAC,QAAQ,GAAG,QAAQ,CAAC;QACxB,GAAG,CAAC,WAAW,GAAG,CAAC,EAAE,GAAG,QAAQ,CAAC;QAEjC,OAAO,CAAC,MAAM,IAAI,GAAG,CAAC,WAAW,CAAC;IACtC,CAAC;CACJ,CAAC,CAAC;AAEH,aAAa;AACb,+BAA+B;AAC/B,cAAc;AACA,sBAAW,GAAG,CAAC,QAAgB,EAAc,EAAE,CAAC,CAAC;IAC3D,KAAK,CAAC,OAAO,EAAE,EAAE,EAAE,QAAQ,EAAE,GAAG;QAC5B,MAAM,GAAG,GAAG,CAAC,OAAO,CAAC,GAAG,KAAK,EAAE,QAAQ,EAAE,QAAQ,EAAE,CAA4B,CAAC;QAEhF,GAAG,CAAC,QAAQ,GAAG,QAAQ,CAAC;QACxB,GAAG,CAAC,QAAQ,KAAK,CAAC,CAAC;QAEnB,GAAG,CAAC,QAAQ,GAAG,QAAQ,GAAG,GAAG,CAAC,QAAQ,GAAG,EAAE,GAAG,QAAQ,CAAC;QACvD,GAAG,CAAC,WAAW,GAAG,GAAG,CAAC,QAAQ,CAAC;QAE/B,OAAO,CAAC,MAAM,IAAI,GAAG,CAAC,WAAW,CAAC;IACtC,CAAC;CACJ,CAAC,CAAC;AAEH,aAAa;AACb,iDAAiD;AACjD,cAAc;AACA,cAAG,GAAG,CAAC,WAAmB,GAAG,EAAc,EAAE,CAAC,CAAC;IACzD,KAAK,CAAC,OAAO,EAAE,EAAE,EAAE,QAAQ,EAAE,GAAG;QAC5B,MAAM,GAAG,GAAG,CAAC,OAAO,CAAC,GAAG,KAAK,EAAE,QAAQ,EAAE,QAAQ,EAAE,CAA4B,CAAC;QAEhF,GAAG,CAAC,QAAQ,GAAG,QAAQ,CAAC;QACxB,GAAG,CAAC,QAAQ,KAAK,CAAC,CAAC;QAEnB,GAAG,CAAC,QAAQ,GAAG,QAAQ,GAAG,GAAG,CAAC,QAAQ,GAAG,EAAE,GAAG,QAAQ,CAAC;QACvD,GAAG,CAAC,WAAW,GAAG,GAAG,CAAC,QAAQ,CAAC;QAE/B,OAAO,CAAC,MAAM,IAAI,GAAG,CAAC,WAAW,CAAC;QAClC,GAAG,CAAC,eAAe,GAAG,SAAS,CAAC;IACpC,CAAC;CACJ,CAAC,CAAC;AAEH,aAAa;AACb,uCAAuC;AACvC,cAAc;AACA,eAAI,GAAG,CAAC,KAAK,GAAG,GAAG,EAAE,KAAK,GAAG,KAAK,EAAE,OAAO,GAAG,IAAI,EAAc,EAAE,CAAC,CAAC;IAC9E,KAAK,CAAC,OAAO,EAAE,EAAE,EAAE,QAAQ,EAAE,GAAG;QAC5B,MAAM,GAAG,GAAG,CAAC,OAAO,CAAC,GAAG,KAAK,EAAE,QAAQ,EAAE,QAAQ,EAAE,CAA4B,CAAC;QAChF,MAAM,CAAC,GAAG,GAAG,CAAC,SAAS,GAAG,CAAC,CAAC;QAE5B,GAAG,CAAC,QAAQ,GAAG,QAAQ,CAAC;QACxB,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC;QACZ,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC;QAEZ,GAAG,CAAC,CAAC,GAAG,KAAK,GAAG,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,KAAK,CAAC,GAAG,QAAQ,CAAC;QAC/C,GAAG,CAAC,CAAC,GAAG,KAAK,GAAG,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,KAAK,CAAC,GAAG,QAAQ,GAAG,QAAQ,CAAC;QAE1D,MAAM,IAAI,GAAG,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,IAAI,CAAC,GAAG,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC;QAC9C,MAAM,IAAI,GAAG,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,IAAI,CAAC,GAAG,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC;QAE9C,GAAG,CAAC,WAAW,GAAG,CAAC,CAAC,EAAE,GAAG,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,OAAO,CAAC,CAAC;QAC7D,OAAO,CAAC,MAAM,IAAI,GAAG,CAAC,WAAW,CAAC;IACtC,CAAC;CACJ,CAAC,CAAC","sourcesContent":["/// <summary>\r\n/// Stochastic Gradient Descent (SGD) optimizer\r\n\r\nimport { IBackpropSynapseContext, IOptimizer } from \"./mlp.training.interfaces\";\r\n\r\nexport class Optimizers {\r\n /// </summary>\r\n public static SGD = (): IOptimizer => ({\r\n apply(synapse, lr, gradient, ctx) {\r\n const bag = (synapse.bag ??= { gradient: gradient }) as IBackpropSynapseContext;\r\n\r\n bag.gradient = gradient;\r\n bag.weightDelta = -lr * gradient;\r\n\r\n synapse.weight += bag.weightDelta;\r\n },\r\n });\r\n\r\n /// <summary>\r\n /// SGD with Momentum optimizer\r\n /// </summary>\r\n public static MomentumSGD = (momentum: number): IOptimizer => ({\r\n apply(synapse, lr, gradient, ctx) {\r\n const bag = (synapse.bag ??= { gradient: gradient }) as IBackpropSynapseContext;\r\n\r\n bag.gradient = gradient;\r\n bag.velocity ??= 0;\r\n\r\n bag.velocity = momentum * bag.velocity - lr * gradient;\r\n bag.weightDelta = bag.velocity;\r\n\r\n synapse.weight += bag.weightDelta;\r\n },\r\n });\r\n\r\n /// <summary>\r\n /// Nesterov Accelerated Gradient (NAG) optimizer\r\n /// </summary>\r\n public static NAG = (momentum: number = 0.9): IOptimizer => ({\r\n apply(synapse, lr, gradient, ctx) {\r\n const bag = (synapse.bag ??= { gradient: gradient }) as IBackpropSynapseContext;\r\n\r\n bag.gradient = gradient;\r\n bag.velocity ??= 0;\r\n\r\n bag.velocity = momentum * bag.velocity - lr * gradient;\r\n bag.weightDelta = bag.velocity;\r\n\r\n synapse.weight += bag.weightDelta;\r\n bag.prelookedWeight = undefined;\r\n },\r\n });\r\n\r\n /// <summary>\r\n /// Adam optimizer with bias correction\r\n /// </summary>\r\n public static Adam = (beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8): IOptimizer => ({\r\n apply(synapse, lr, gradient, ctx) {\r\n const bag = (synapse.bag ??= { gradient: gradient }) as IBackpropSynapseContext;\r\n const t = ctx.iteration + 1;\r\n\r\n bag.gradient = gradient;\r\n bag.m ??= 0;\r\n bag.v ??= 0;\r\n\r\n bag.m = beta1 * bag.m + (1 - beta1) * gradient;\r\n bag.v = beta2 * bag.v + (1 - beta2) * gradient * gradient;\r\n\r\n const mHat = bag.m / (1 - Math.pow(beta1, t));\r\n const vHat = bag.v / (1 - Math.pow(beta2, t));\r\n\r\n bag.weightDelta = (-lr * mHat) / (Math.sqrt(vHat) + epsilon);\r\n synapse.weight += bag.weightDelta;\r\n },\r\n });\r\n}\r\n"]}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { IMlpGraph } from "../mlp.interfaces";
|
|
2
|
+
import { MLPInferenceRuntime } from "../mlp.inference";
|
|
3
|
+
import { ILossFunction, IOptimizer, ITrainingContext } from "./mlp.training.interfaces";
|
|
4
|
+
export declare class MLPTrainingRuntime {
|
|
5
|
+
readonly graph: IMlpGraph;
|
|
6
|
+
readonly runtime: MLPInferenceRuntime;
|
|
7
|
+
readonly lossFn: ILossFunction;
|
|
8
|
+
readonly learningRate: number;
|
|
9
|
+
readonly optimizer: IOptimizer;
|
|
10
|
+
private context;
|
|
11
|
+
constructor(graph: IMlpGraph, runtime: MLPInferenceRuntime, lossFn: ILossFunction, learningRate: number, optimizer: IOptimizer);
|
|
12
|
+
trainStep(inputs: number[], expected: number[]): number;
|
|
13
|
+
/**
|
|
14
|
+
* Performs backpropagation and stores gradients in neuron and synapse bags.
|
|
15
|
+
* This version properly propagates gradients through all layers of the network.
|
|
16
|
+
*/
|
|
17
|
+
private _backpropagate;
|
|
18
|
+
private _applyGradients;
|
|
19
|
+
get trainingContext(): Readonly<ITrainingContext>;
|
|
20
|
+
clearContext(): void;
|
|
21
|
+
deleteContext(): void;
|
|
22
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { IInferenceNeuronContext, IMlpSynapse } from "../mlp.interfaces";
|
|
2
|
+
export interface IBackpropNeuronContext extends IInferenceNeuronContext {
|
|
3
|
+
error: number;
|
|
4
|
+
gradient?: number;
|
|
5
|
+
}
|
|
6
|
+
export interface IBackpropSynapseContext {
|
|
7
|
+
gradient: number;
|
|
8
|
+
velocity?: number;
|
|
9
|
+
m?: number;
|
|
10
|
+
v?: number;
|
|
11
|
+
prelookedWeight?: number;
|
|
12
|
+
weightDelta?: number;
|
|
13
|
+
}
|
|
14
|
+
export interface ILossFunction {
|
|
15
|
+
loss(output: number, expected: number): number;
|
|
16
|
+
dLoss(output: number, expected: number): number;
|
|
17
|
+
}
|
|
18
|
+
export interface IOptimizer {
|
|
19
|
+
apply(synapse: IMlpSynapse, learningRate: number, gradient: number, context: ITrainingContext): void;
|
|
20
|
+
}
|
|
21
|
+
export interface ITrainingContext {
|
|
22
|
+
iteration: number;
|
|
23
|
+
epoch?: number;
|
|
24
|
+
batchIndex?: number;
|
|
25
|
+
batchSize?: number;
|
|
26
|
+
loss?: number;
|
|
27
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"mlp.training.interfaces.js","sourceRoot":"","sources":["../../../../../src/neuralnetwork/ann/mlp/training/mlp.training.interfaces.ts"],"names":[],"mappings":"","sourcesContent":["import { IInferenceNeuronContext, IMlpSynapse } from \"../mlp.interfaces\";\r\n\r\nexport interface IBackpropNeuronContext extends IInferenceNeuronContext {\r\n error: number;\r\n gradient?: number;\r\n}\r\n\r\n/// <summary>\r\n/// Represents the runtime training context for a synapse during backpropagation.\r\n/// </summary>\r\nexport interface IBackpropSynapseContext {\r\n /// <summary>Gradient of the loss with respect to the synaptic weight</summary>\r\n gradient: number;\r\n\r\n /// <summary>Velocity term used in momentum-based optimizers (e.g., SGD with momentum, NAG)</summary>\r\n velocity?: number;\r\n\r\n /// <summary>First moment estimate (mean of gradients), used by the Adam optimizer</summary>\r\n m?: number;\r\n\r\n /// <summary>Second moment estimate (uncentered variance of gradients), used by the Adam optimizer</summary>\r\n v?: number;\r\n\r\n /// <summary>Anticipated weight used during the forward pass (for NAG optimizers)</summary>\r\n prelookedWeight?: number;\r\n\r\n /// <summary>Suggested weight update (usually learningRate × gradient), may be applied after batch or optimization step</summary>\r\n weightDelta?: number;\r\n}\r\n\r\nexport interface ILossFunction {\r\n loss(output: number, expected: number): number;\r\n dLoss(output: number, expected: number): number;\r\n}\r\n\r\n/// <summary>\r\n/// Defines a strategy for applying weight updates to a synapse during training.\r\n/// </summary>\r\nexport interface IOptimizer {\r\n /// <summary>Applies the weight update based on gradient and internal context</summary>\r\n apply(synapse: IMlpSynapse, learningRate: number, gradient: number, context: ITrainingContext): void;\r\n}\r\nexport interface ITrainingContext {\r\n iteration: number;\r\n epoch?: number;\r\n batchIndex?: number;\r\n batchSize?: number;\r\n loss?: number;\r\n}\r\n"]}
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
import { MLPRuntimeUtils } from "../mlp.runtime.utils";
|
|
2
|
+
/// <summary>
|
|
3
|
+
/// Handles backpropagation and weight updates for an MLP graph.
|
|
4
|
+
/// </summary>
|
|
5
|
+
export class MLPTrainingRuntime {
|
|
6
|
+
constructor(graph, runtime, lossFn, learningRate, optimizer) {
|
|
7
|
+
this.graph = graph;
|
|
8
|
+
this.runtime = runtime;
|
|
9
|
+
this.lossFn = lossFn;
|
|
10
|
+
this.learningRate = learningRate;
|
|
11
|
+
this.optimizer = optimizer;
|
|
12
|
+
this.context = { iteration: 0 };
|
|
13
|
+
}
|
|
14
|
+
/// <summary>
|
|
15
|
+
/// Runs a forward + backward pass and updates weights.
|
|
16
|
+
/// </summary>
|
|
17
|
+
trainStep(inputs, expected) {
|
|
18
|
+
const outputs = this.runtime.run(inputs);
|
|
19
|
+
const loss = this._backpropagate(outputs, expected);
|
|
20
|
+
this._applyGradients();
|
|
21
|
+
this.context.iteration++;
|
|
22
|
+
return loss;
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* Performs backpropagation and stores gradients in neuron and synapse bags.
|
|
26
|
+
* This version properly propagates gradients through all layers of the network.
|
|
27
|
+
*/
|
|
28
|
+
_backpropagate(outputs, expected) {
|
|
29
|
+
let totalLoss = 0;
|
|
30
|
+
// STEP 1 – Output layer: compute error and gradient
|
|
31
|
+
for (let i = 0; i < this.graph.outputs.length; i++) {
|
|
32
|
+
const neuron = this.graph.outputs[i];
|
|
33
|
+
const y = expected[i];
|
|
34
|
+
const bag = (neuron.bag ??= {});
|
|
35
|
+
const output = bag.activation;
|
|
36
|
+
const loss = this.lossFn.loss(output, y);
|
|
37
|
+
totalLoss += loss;
|
|
38
|
+
const dLoss = this.lossFn.dLoss(output, y); // ∂L/∂o
|
|
39
|
+
const activationPrime = (neuron.activationFn ?? this.runtime.mainActivation).derivative;
|
|
40
|
+
bag.gradient = dLoss * activationPrime(output); // ∂L/∂z
|
|
41
|
+
bag.error = y - output;
|
|
42
|
+
}
|
|
43
|
+
// STEP 2 – Hidden layer: compute gradients from output layer
|
|
44
|
+
for (let i = this.graph.hiddens.length - 1; i >= 0; i--) {
|
|
45
|
+
const neuron = this.graph.hiddens[i];
|
|
46
|
+
const bag = (neuron.bag ??= {});
|
|
47
|
+
const activation = bag.activation;
|
|
48
|
+
const activationPrime = (neuron.activationFn ?? this.runtime.mainActivation).derivative;
|
|
49
|
+
let downstreamSum = 0;
|
|
50
|
+
for (const syn of neuron.onsc() ?? []) {
|
|
51
|
+
const to = syn.ofin;
|
|
52
|
+
const toBag = to.bag;
|
|
53
|
+
downstreamSum += syn.weight * (toBag?.gradient ?? 0);
|
|
54
|
+
}
|
|
55
|
+
bag.gradient = activationPrime(activation) * downstreamSum;
|
|
56
|
+
}
|
|
57
|
+
// STEP 3 – Accumulate gradient on all synapses
|
|
58
|
+
for (const syn of this.graph.links) {
|
|
59
|
+
const from = syn.oini;
|
|
60
|
+
const to = syn.ofin;
|
|
61
|
+
const fromBag = from.bag;
|
|
62
|
+
const toBag = to.bag;
|
|
63
|
+
const synBag = (syn.bag ??= {});
|
|
64
|
+
synBag.gradient = (toBag?.gradient ?? 0) * (fromBag?.activation ?? 0);
|
|
65
|
+
}
|
|
66
|
+
return totalLoss;
|
|
67
|
+
}
|
|
68
|
+
/// <summary>
|
|
69
|
+
/// Applies weight updates using the selected optimizer.
|
|
70
|
+
/// </summary>
|
|
71
|
+
_applyGradients() {
|
|
72
|
+
// Update synapse weights
|
|
73
|
+
for (const synapse of this.graph.links) {
|
|
74
|
+
const ctx = synapse.bag;
|
|
75
|
+
if (ctx?.gradient !== undefined) {
|
|
76
|
+
this.optimizer.apply(synapse, this.learningRate, ctx.gradient, this.context);
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
// Update neuron biases
|
|
80
|
+
for (const neuron of this.graph.nodes) {
|
|
81
|
+
const ctx = neuron.bag;
|
|
82
|
+
if (ctx?.gradient !== undefined) {
|
|
83
|
+
neuron.bias -= this.learningRate * ctx.gradient;
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
get trainingContext() {
|
|
88
|
+
return this.context;
|
|
89
|
+
}
|
|
90
|
+
clearContext() {
|
|
91
|
+
for (const neuron of this.graph.nodes) {
|
|
92
|
+
MLPRuntimeUtils.resetBackpropContext(neuron);
|
|
93
|
+
}
|
|
94
|
+
for (const synapse of this.graph.links) {
|
|
95
|
+
MLPRuntimeUtils.resetBackpropContext(synapse);
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
deleteContext() {
|
|
99
|
+
for (const neuron of this.graph.nodes) {
|
|
100
|
+
neuron.bag = undefined;
|
|
101
|
+
}
|
|
102
|
+
for (const synapse of this.graph.links) {
|
|
103
|
+
synapse.bag = undefined;
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
//# sourceMappingURL=mlp.training.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"mlp.training.js","sourceRoot":"","sources":["../../../../../src/neuralnetwork/ann/mlp/training/mlp.training.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,eAAe,EAAE,MAAM,sBAAsB,CAAC;AAEvD,aAAa;AACb,gEAAgE;AAChE,cAAc;AACd,MAAM,OAAO,kBAAkB;IAG3B,YACoB,KAAgB,EAChB,OAA4B,EAC5B,MAAqB,EACrB,YAAoB,EACpB,SAAqB;QAJrB,UAAK,GAAL,KAAK,CAAW;QAChB,YAAO,GAAP,OAAO,CAAqB;QAC5B,WAAM,GAAN,MAAM,CAAe;QACrB,iBAAY,GAAZ,YAAY,CAAQ;QACpB,cAAS,GAAT,SAAS,CAAY;QAPjC,YAAO,GAAqB,EAAE,SAAS,EAAE,CAAC,EAAE,CAAC;IAQlD,CAAC;IAEJ,aAAa;IACb,uDAAuD;IACvD,cAAc;IACP,SAAS,CAAC,MAAgB,EAAE,QAAkB;QACjD,MAAM,OAAO,GAAG,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;QACzC,MAAM,IAAI,GAAG,IAAI,CAAC,cAAc,CAAC,OAAO,EAAE,QAAQ,CAAC,CAAC;QACpD,IAAI,CAAC,eAAe,EAAE,CAAC;QACvB,IAAI,CAAC,OAAO,CAAC,SAAS,EAAE,CAAC;QACzB,OAAO,IAAI,CAAC;IAChB,CAAC;IAED;;;OAGG;IACK,cAAc,CAAC,OAAiB,EAAE,QAAkB;QACxD,IAAI,SAAS,GAAG,CAAC,CAAC;QAElB,oDAAoD;QACpD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;YACjD,MAAM,MAAM,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAe,CAAC;YACnD,MAAM,CAAC,GAAG,QAAQ,CAAC,CAAC,CAAC,CAAC;YAEtB,MAAM,GAAG,GAAG,CAAC,MAAM,CAAC,GAAG,KAAK,EAAE,CAA2B,CAAC;YAC1D,MAAM,MAAM,GAAG,GAAG,CAAC,UAAU,CAAC;YAE9B,MAAM,IAAI,GAAG,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC;YACzC,SAAS,IAAI,IAAI,CAAC;YAElB,MAAM,KAAK,GAAG,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,QAAQ;YACpD,MAAM,eAAe,GAAG,CAAC,MAAM,CAAC,YAAY,IAAI,IAAI,CAAC,OAAO,CAAC,cAAc,CAAC,CAAC,UAAU,CAAC;YAExF,GAAG,CAAC,QAAQ,GAAG,KAAK,GAAG,eAAe,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ;YACxD,GAAG,CAAC,KAAK,GAAG,CAAC,GAAG,MAAM,CAAC;QAC3B,CAAC;QAED,6DAA6D;QAC7D,KAAK,IAAI,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC;YACtD,MAAM,MAAM,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAe,CAAC;YACnD,MAAM,GAAG,GAAG,CAAC,MAAM,CAAC,GAAG,KAAK,EAAE,CAA2B,CAAC;YAC1D,MAAM,UAAU,GAAG,GAAG,CAAC,UAAU,CAAC;YAClC,MAAM,eAAe,GAAG,CAAC,MAAM,CAAC,YAAY,IAAI,IAAI,CAAC,OAAO,CAAC,cAAc,CAAC,CAAC,UAAU,CAAC;YAExF,IAAI,aAAa,GAAG,CAAC,CAAC;YACtB,KAAK,MAAM,GAAG,IAAI,MAAM,CAAC,IAAI,EAAe,IAAI,EAAE,EAAE,CAAC;gBACjD,MAAM,EAAE,GAAG,GAAG,CAAC,IAAkB,CAAC;gBAClC,MAAM,KAAK,GAAG,EAAE,CAAC,GAA6B,CAAC;gBAC/C,aAAa,IAAI,GAAG,CAAC,MAAM,GAAG,CAAC,KAAK,EAAE,QAAQ,IAAI,CAAC,CAAC,CAAC;YACzD,CAAC;YAED,GAAG,CAAC,QAAQ,GAAG,eAAe,CAAC,UAAU,CAAC,GAAG,aAAa,CAAC;QAC/D,CAAC;QAED,+CAA+C;QAC/C,KAAK,MAAM,GAAG,IAAI,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;YACjC,MAAM,IAAI,GAAG,GAAG,CAAC,IAAkB,CAAC;YACpC,MAAM,EAAE,GAAG,GAAG,CAAC,IAAkB,CAAC;YAElC,MAAM,OAAO,GAAG,IAAI,CAAC,GAA6B,CAAC;YACnD,MAAM,KAAK,GAAG,EAAE,CAAC,GAA6B,CAAC;YAE/C,MAAM,MAAM,GAAG,CAAC,GAAG,CAAC,GAAG,KAAK,EAAE,CAA4B,CAAC;YAC3D,MAAM,CAAC,QAAQ,GAAG,CAAC,KAAK,EAAE,QAAQ,IAAI,CAAC,CAAC,GAAG,CAAC,OAAO,EAAE,UAAU,IAAI,CAAC,CAAC,CAAC;QAC1E,CAAC;QAED,OAAO,SAAS,CAAC;IACrB,CAAC;IAED,aAAa;IACb,wDAAwD;IACxD,cAAc;IACN,eAAe;QACnB,yBAAyB;QACzB,KAAK,MAAM,OAAO,IAAI,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;YACrC,MAAM,GAAG,GAAG,OAAO,CAAC,GAA8B,CAAC;YACnD,IAAI,GAAG,EAAE,QAAQ,KAAK,SAAS,EAAE,CAAC;gBAC9B,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC,OAAO,EAAE,IAAI,CAAC,YAAY,EAAE,GAAG,CAAC,QAAQ,EAAE,IAAI,CAAC,OAAO,CAAC,CAAC;YACjF,CAAC;QACL,CAAC;QAED,uBAAuB;QACvB,KAAK,MAAM,MAAM,IAAI,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;YACpC,MAAM,GAAG,GAAG,MAAM,CAAC,GAA6B,CAAC;YACjD,IAAI,GAAG,EAAE,QAAQ,KAAK,SAAS,EAAE,CAAC;gBAC9B,MAAM,CAAC,IAAI,IAAI,IAAI,CAAC,YAAY,GAAG,GAAG,CAAC,QAAQ,CAAC;YACpD,CAAC;QACL,CAAC;IACL,CAAC;IACD,IAAI,eAAe;QACf,OAAO,IAAI,CAAC,OAAO,CAAC;IACxB,CAAC;IAEM,YAAY;QACf,KAAK,MAAM,MAAM,IAAI,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;YACpC,eAAe,CAAC,oBAAoB,CAAC,MAAM,CAAC,CAAC;QACjD,CAAC;QACD,KAAK,MAAM,OAAO,IAAI,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;YACrC,eAAe,CAAC,oBAAoB,CAAC,OAAO,CAAC,CAAC;QAClD,CAAC;IACL,CAAC;IAEM,aAAa;QAChB,KAAK,MAAM,MAAM,IAAI,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;YACpC,MAAM,CAAC,GAAG,GAAG,SAAS,CAAC;QAC3B,CAAC;QACD,KAAK,MAAM,OAAO,IAAI,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;YACrC,OAAO,CAAC,GAAG,GAAG,SAAS,CAAC;QAC5B,CAAC;IACL,CAAC;CACJ","sourcesContent":["import { IMlpGraph, IMlpNeuron, IMlpSynapse } from \"../mlp.interfaces\";\r\nimport { MLPInferenceRuntime } from \"../mlp.inference\";\r\nimport { IBackpropNeuronContext, IBackpropSynapseContext, ILossFunction, IOptimizer, ITrainingContext } from \"./mlp.training.interfaces\";\r\nimport { MLPRuntimeUtils } from \"../mlp.runtime.utils\";\r\n\r\n/// <summary>\r\n/// Handles backpropagation and weight updates for an MLP graph.\r\n/// </summary>\r\nexport class MLPTrainingRuntime {\r\n private context: ITrainingContext = { iteration: 0 };\r\n\r\n constructor(\r\n public readonly graph: IMlpGraph,\r\n public readonly runtime: MLPInferenceRuntime,\r\n public readonly lossFn: ILossFunction,\r\n public readonly learningRate: number,\r\n public readonly optimizer: IOptimizer\r\n ) {}\r\n\r\n /// <summary>\r\n /// Runs a forward + backward pass and updates weights.\r\n /// </summary>\r\n public trainStep(inputs: number[], expected: number[]): number {\r\n const outputs = this.runtime.run(inputs);\r\n const loss = this._backpropagate(outputs, expected);\r\n this._applyGradients();\r\n this.context.iteration++;\r\n return loss;\r\n }\r\n\r\n /**\r\n * Performs backpropagation and stores gradients in neuron and synapse bags.\r\n * This version properly propagates gradients through all layers of the network.\r\n */\r\n private _backpropagate(outputs: number[], expected: number[]): number {\r\n let totalLoss = 0;\r\n\r\n // STEP 1 – Output layer: compute error and gradient\r\n for (let i = 0; i < this.graph.outputs.length; i++) {\r\n const neuron = this.graph.outputs[i] as IMlpNeuron;\r\n const y = expected[i];\r\n\r\n const bag = (neuron.bag ??= {}) as IBackpropNeuronContext;\r\n const output = bag.activation;\r\n\r\n const loss = this.lossFn.loss(output, y);\r\n totalLoss += loss;\r\n\r\n const dLoss = this.lossFn.dLoss(output, y); // ∂L/∂o\r\n const activationPrime = (neuron.activationFn ?? this.runtime.mainActivation).derivative;\r\n\r\n bag.gradient = dLoss * activationPrime(output); // ∂L/∂z\r\n bag.error = y - output;\r\n }\r\n\r\n // STEP 2 – Hidden layer: compute gradients from output layer\r\n for (let i = this.graph.hiddens.length - 1; i >= 0; i--) {\r\n const neuron = this.graph.hiddens[i] as IMlpNeuron;\r\n const bag = (neuron.bag ??= {}) as IBackpropNeuronContext;\r\n const activation = bag.activation;\r\n const activationPrime = (neuron.activationFn ?? this.runtime.mainActivation).derivative;\r\n\r\n let downstreamSum = 0;\r\n for (const syn of neuron.onsc<IMlpSynapse>() ?? []) {\r\n const to = syn.ofin as IMlpNeuron;\r\n const toBag = to.bag as IBackpropNeuronContext;\r\n downstreamSum += syn.weight * (toBag?.gradient ?? 0);\r\n }\r\n\r\n bag.gradient = activationPrime(activation) * downstreamSum;\r\n }\r\n\r\n // STEP 3 – Accumulate gradient on all synapses\r\n for (const syn of this.graph.links) {\r\n const from = syn.oini as IMlpNeuron;\r\n const to = syn.ofin as IMlpNeuron;\r\n\r\n const fromBag = from.bag as IBackpropNeuronContext;\r\n const toBag = to.bag as IBackpropNeuronContext;\r\n\r\n const synBag = (syn.bag ??= {}) as IBackpropSynapseContext;\r\n synBag.gradient = (toBag?.gradient ?? 0) * (fromBag?.activation ?? 0);\r\n }\r\n\r\n return totalLoss;\r\n }\r\n\r\n /// <summary>\r\n /// Applies weight updates using the selected optimizer.\r\n /// </summary>\r\n private _applyGradients(): void {\r\n // Update synapse weights\r\n for (const synapse of this.graph.links) {\r\n const ctx = synapse.bag as IBackpropSynapseContext;\r\n if (ctx?.gradient !== undefined) {\r\n this.optimizer.apply(synapse, this.learningRate, ctx.gradient, this.context);\r\n }\r\n }\r\n\r\n // Update neuron biases\r\n for (const neuron of this.graph.nodes) {\r\n const ctx = neuron.bag as IBackpropNeuronContext;\r\n if (ctx?.gradient !== undefined) {\r\n neuron.bias -= this.learningRate * ctx.gradient;\r\n }\r\n }\r\n }\r\n get trainingContext(): Readonly<ITrainingContext> {\r\n return this.context;\r\n }\r\n\r\n public clearContext() {\r\n for (const neuron of this.graph.nodes) {\r\n MLPRuntimeUtils.resetBackpropContext(neuron);\r\n }\r\n for (const synapse of this.graph.links) {\r\n MLPRuntimeUtils.resetBackpropContext(synapse);\r\n }\r\n }\r\n\r\n public deleteContext() {\r\n for (const neuron of this.graph.nodes) {\r\n neuron.bag = undefined;\r\n }\r\n for (const synapse of this.graph.links) {\r\n synapse.bag = undefined;\r\n }\r\n }\r\n}\r\n"]}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
export * from "./ann";
|
|
2
|
+
export * from "./snn";
|
|
3
|
+
export * from "./nn.weights";
|
|
4
|
+
export * from "./nn.interfaces";
|
|
5
|
+
export * from "./nn.interfaces.builder";
|
|
6
|
+
export * from "./nn.synapse";
|
|
7
|
+
export * from "./nn.neuron";
|
|
8
|
+
export * from "./nn.builders";
|
|
9
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/neuralnetwork/index.ts"],"names":[],"mappings":"AAAA,cAAc,OAAO,CAAC;AACtB,cAAc,OAAO,CAAC;AACtB,cAAc,cAAc,CAAC;AAC7B,cAAc,iBAAiB,CAAC;AAChC,cAAc,yBAAyB,CAAC;AACxC,cAAc,cAAc,CAAC;AAC7B,cAAc,aAAa,CAAC;AAC5B,cAAc,eAAe,CAAC","sourcesContent":["export * from \"./ann\";\r\nexport * from \"./snn\";\r\nexport * from \"./nn.weights\";\r\nexport * from \"./nn.interfaces\";\r\nexport * from \"./nn.interfaces.builder\";\r\nexport * from \"./nn.synapse\";\r\nexport * from \"./nn.neuron\";\r\nexport * from \"./nn.builders\";\r\n"]}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import { LinkBuilder } from "../graph";
|
|
2
|
+
import { ILayer, ILayerConnection, INeuron, ISynapse, LayerConnectionType } from "./nn.interfaces";
|
|
3
|
+
import { ILayerConnectionBuilder, ISynapseBuilder } from "./nn.interfaces.builder";
|
|
4
|
+
import { IWeightInitializer } from "./nn.weights";
|
|
5
|
+
export declare class SynapseBuilder extends LinkBuilder implements ISynapseBuilder {
|
|
6
|
+
private _weight;
|
|
7
|
+
constructor();
|
|
8
|
+
withWeight(weight: number): ISynapseBuilder;
|
|
9
|
+
build(...args: any[]): ISynapse;
|
|
10
|
+
}
|
|
11
|
+
export declare class LayerConnectionBuilder implements ILayerConnectionBuilder {
|
|
12
|
+
private _type;
|
|
13
|
+
private _weightInitializer;
|
|
14
|
+
private _synapseBuilder;
|
|
15
|
+
withType(type: LayerConnectionType): ILayerConnectionBuilder;
|
|
16
|
+
withWeightInitializer(initializer: IWeightInitializer): ILayerConnectionBuilder;
|
|
17
|
+
withSynapseBuilder(builder: SynapseBuilder): ILayerConnectionBuilder;
|
|
18
|
+
build(source: ILayer<INeuron>, target: ILayer<INeuron>): ILayerConnection<ISynapse> | undefined;
|
|
19
|
+
private buildFullConnection;
|
|
20
|
+
private buildOneToOneConnection;
|
|
21
|
+
}
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import { LinkBuilder } from "../graph";
|
|
2
|
+
import { LayerConnectionType } from "./nn.interfaces";
|
|
3
|
+
import { Synapse } from "./nn.synapse";
|
|
4
|
+
import { Uniform } from "./nn.weights";
|
|
5
|
+
export class SynapseBuilder extends LinkBuilder {
|
|
6
|
+
constructor() {
|
|
7
|
+
super();
|
|
8
|
+
this._weight = 0.0; // Default weight
|
|
9
|
+
this.withType(Synapse);
|
|
10
|
+
}
|
|
11
|
+
withWeight(weight) {
|
|
12
|
+
this._weight = weight;
|
|
13
|
+
return this;
|
|
14
|
+
}
|
|
15
|
+
build(...args) {
|
|
16
|
+
const synapse = super.build(...args);
|
|
17
|
+
synapse.weight = this._weight;
|
|
18
|
+
return synapse;
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
export class LayerConnectionBuilder {
|
|
22
|
+
constructor() {
|
|
23
|
+
this._type = LayerConnectionType.FullyConnected; // Default connection type
|
|
24
|
+
this._weightInitializer = new Uniform(); // Default weight initializer
|
|
25
|
+
this._synapseBuilder = new SynapseBuilder(); // Default synapse builder
|
|
26
|
+
}
|
|
27
|
+
withType(type) {
|
|
28
|
+
this._type = type;
|
|
29
|
+
return this;
|
|
30
|
+
}
|
|
31
|
+
withWeightInitializer(initializer) {
|
|
32
|
+
this._weightInitializer = initializer;
|
|
33
|
+
return this;
|
|
34
|
+
}
|
|
35
|
+
withSynapseBuilder(builder) {
|
|
36
|
+
this._synapseBuilder = builder;
|
|
37
|
+
return this;
|
|
38
|
+
}
|
|
39
|
+
build(source, target) {
|
|
40
|
+
switch (this._type) {
|
|
41
|
+
case LayerConnectionType.FullyConnected:
|
|
42
|
+
return this.buildFullConnection(source, target);
|
|
43
|
+
case LayerConnectionType.OneToOne:
|
|
44
|
+
return this.buildOneToOneConnection(source, target);
|
|
45
|
+
default:
|
|
46
|
+
return undefined;
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
buildFullConnection(source, target) {
|
|
50
|
+
const synapses = [];
|
|
51
|
+
for (const sourceNeuron of source) {
|
|
52
|
+
for (const targetNeuron of target) {
|
|
53
|
+
const synapse = this._synapseBuilder.withWeight(this._weightInitializer.next()).withFrom(sourceNeuron).withTo(targetNeuron).build();
|
|
54
|
+
synapses.push(synapse);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
return synapses;
|
|
58
|
+
}
|
|
59
|
+
buildOneToOneConnection(source, target) {
|
|
60
|
+
if (source.length !== target.length) {
|
|
61
|
+
throw new Error("Source and target layers must have the same number of neurons for one-to-one connection.");
|
|
62
|
+
}
|
|
63
|
+
const synapses = [];
|
|
64
|
+
for (let i = 0; i < source.length; i++) {
|
|
65
|
+
const synapse = this._synapseBuilder.withWeight(this._weightInitializer.next()).withFrom(source[i]).withTo(target[i]).build();
|
|
66
|
+
synapses.push(synapse);
|
|
67
|
+
}
|
|
68
|
+
return synapses;
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
//# sourceMappingURL=nn.builders.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"nn.builders.js","sourceRoot":"","sources":["../../src/neuralnetwork/nn.builders.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AACvC,OAAO,EAA+C,mBAAmB,EAAE,MAAM,iBAAiB,CAAC;AAEnG,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AACvC,OAAO,EAAsB,OAAO,EAAE,MAAM,cAAc,CAAC;AAE3D,MAAM,OAAO,cAAe,SAAQ,WAAW;IAG3C;QACI,KAAK,EAAE,CAAC;QAHJ,YAAO,GAAW,GAAG,CAAC,CAAC,iBAAiB;QAI5C,IAAI,CAAC,QAAQ,CAAC,OAAO,CAAC,CAAC;IAC3B,CAAC;IAEM,UAAU,CAAC,MAAc;QAC5B,IAAI,CAAC,OAAO,GAAG,MAAM,CAAC;QACtB,OAAO,IAAI,CAAC;IAChB,CAAC;IAEM,KAAK,CAAC,GAAG,IAAW;QACvB,MAAM,OAAO,GAAG,KAAK,CAAC,KAAK,CAAC,GAAG,IAAI,CAAa,CAAC;QACjD,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,OAAO,CAAC;QAC9B,OAAO,OAAO,CAAC;IACnB,CAAC;CACJ;AAED,MAAM,OAAO,sBAAsB;IAAnC;QACY,UAAK,GAAwB,mBAAmB,CAAC,cAAc,CAAC,CAAC,0BAA0B;QAC3F,uBAAkB,GAAuB,IAAI,OAAO,EAAE,CAAC,CAAC,6BAA6B;QACrF,oBAAe,GAAmB,IAAI,cAAc,EAAE,CAAC,CAAC,0BAA0B;IAkD9F,CAAC;IAhDU,QAAQ,CAAC,IAAyB;QACrC,IAAI,CAAC,KAAK,GAAG,IAAI,CAAC;QAClB,OAAO,IAAI,CAAC;IAChB,CAAC;IAEM,qBAAqB,CAAC,WAA+B;QACxD,IAAI,CAAC,kBAAkB,GAAG,WAAW,CAAC;QACtC,OAAO,IAAI,CAAC;IAChB,CAAC;IAEM,kBAAkB,CAAC,OAAuB;QAC7C,IAAI,CAAC,eAAe,GAAG,OAAO,CAAC;QAC/B,OAAO,IAAI,CAAC;IAChB,CAAC;IAED,KAAK,CAAC,MAAuB,EAAE,MAAuB;QAClD,QAAQ,IAAI,CAAC,KAAK,EAAE,CAAC;YACjB,KAAK,mBAAmB,CAAC,cAAc;gBACnC,OAAO,IAAI,CAAC,mBAAmB,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;YACpD,KAAK,mBAAmB,CAAC,QAAQ;gBAC7B,OAAO,IAAI,CAAC,uBAAuB,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;YACxD;gBACI,OAAO,SAAS,CAAC;QACzB,CAAC;IACL,CAAC;IAEO,mBAAmB,CAAC,MAAuB,EAAE,MAAuB;QACxE,MAAM,QAAQ,GAAoB,EAAE,CAAC;QACrC,KAAK,MAAM,YAAY,IAAI,MAAM,EAAE,CAAC;YAChC,KAAK,MAAM,YAAY,IAAI,MAAM,EAAE,CAAC;gBAChC,MAAM,OAAO,GAAG,IAAI,CAAC,eAAe,CAAC,UAAU,CAAC,IAAI,CAAC,kBAAkB,CAAC,IAAI,EAAE,CAAC,CAAC,QAAQ,CAAC,YAAY,CAAC,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC,KAAK,EAAc,CAAC;gBAChJ,QAAQ,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;YAC3B,CAAC;QACL,CAAC;QACD,OAAO,QAAQ,CAAC;IACpB,CAAC;IAEO,uBAAuB,CAAC,MAAuB,EAAE,MAAuB;QAC5E,IAAI,MAAM,CAAC,MAAM,KAAK,MAAM,CAAC,MAAM,EAAE,CAAC;YAClC,MAAM,IAAI,KAAK,CAAC,0FAA0F,CAAC,CAAC;QAChH,CAAC;QACD,MAAM,QAAQ,GAA+B,EAAE,CAAC;QAChD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;YACrC,MAAM,OAAO,GAAG,IAAI,CAAC,eAAe,CAAC,UAAU,CAAC,IAAI,CAAC,kBAAkB,CAAC,IAAI,EAAE,CAAC,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,EAAc,CAAC;YAC1I,QAAQ,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;QAC3B,CAAC;QACD,OAAO,QAAQ,CAAC;IACpB,CAAC;CACJ","sourcesContent":["import { LinkBuilder } from \"../graph\";\r\nimport { ILayer, ILayerConnection, INeuron, ISynapse, LayerConnectionType } from \"./nn.interfaces\";\r\nimport { ILayerConnectionBuilder, ISynapseBuilder } from \"./nn.interfaces.builder\";\r\nimport { Synapse } from \"./nn.synapse\";\r\nimport { IWeightInitializer, Uniform } from \"./nn.weights\";\r\n\r\nexport class SynapseBuilder extends LinkBuilder implements ISynapseBuilder {\r\n private _weight: number = 0.0; // Default weight\r\n\r\n public constructor() {\r\n super();\r\n this.withType(Synapse);\r\n }\r\n\r\n public withWeight(weight: number): ISynapseBuilder {\r\n this._weight = weight;\r\n return this;\r\n }\r\n\r\n public build(...args: any[]): ISynapse {\r\n const synapse = super.build(...args) as ISynapse;\r\n synapse.weight = this._weight;\r\n return synapse;\r\n }\r\n}\r\n\r\nexport class LayerConnectionBuilder implements ILayerConnectionBuilder {\r\n private _type: LayerConnectionType = LayerConnectionType.FullyConnected; // Default connection type\r\n private _weightInitializer: IWeightInitializer = new Uniform(); // Default weight initializer\r\n private _synapseBuilder: SynapseBuilder = new SynapseBuilder(); // Default synapse builder\r\n\r\n public withType(type: LayerConnectionType): ILayerConnectionBuilder {\r\n this._type = type;\r\n return this;\r\n }\r\n\r\n public withWeightInitializer(initializer: IWeightInitializer): ILayerConnectionBuilder {\r\n this._weightInitializer = initializer;\r\n return this;\r\n }\r\n\r\n public withSynapseBuilder(builder: SynapseBuilder): ILayerConnectionBuilder {\r\n this._synapseBuilder = builder;\r\n return this;\r\n }\r\n\r\n build(source: ILayer<INeuron>, target: ILayer<INeuron>): ILayerConnection<ISynapse> | undefined {\r\n switch (this._type) {\r\n case LayerConnectionType.FullyConnected:\r\n return this.buildFullConnection(source, target);\r\n case LayerConnectionType.OneToOne:\r\n return this.buildOneToOneConnection(source, target);\r\n default:\r\n return undefined;\r\n }\r\n }\r\n\r\n private buildFullConnection(source: ILayer<INeuron>, target: ILayer<INeuron>): ILayerConnection<ISynapse> {\r\n const synapses: Array<ISynapse> = [];\r\n for (const sourceNeuron of source) {\r\n for (const targetNeuron of target) {\r\n const synapse = this._synapseBuilder.withWeight(this._weightInitializer.next()).withFrom(sourceNeuron).withTo(targetNeuron).build() as ISynapse;\r\n synapses.push(synapse);\r\n }\r\n }\r\n return synapses;\r\n }\r\n\r\n private buildOneToOneConnection(source: ILayer<INeuron>, target: ILayer<INeuron>): ILayerConnection<ISynapse> | undefined {\r\n if (source.length !== target.length) {\r\n throw new Error(\"Source and target layers must have the same number of neurons for one-to-one connection.\");\r\n }\r\n const synapses: ILayerConnection<ISynapse> = [];\r\n for (let i = 0; i < source.length; i++) {\r\n const synapse = this._synapseBuilder.withWeight(this._weightInitializer.next()).withFrom(source[i]).withTo(target[i]).build() as ISynapse;\r\n synapses.push(synapse);\r\n }\r\n return synapses;\r\n }\r\n}\r\n"]}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import { ILinkBuilder } from "../graph";
|
|
2
|
+
import { SynapseBuilder } from "./nn.builders";
|
|
3
|
+
import { ILayer, ILayerConnection, INeuron, ISynapse, LayerConnectionType } from "./nn.interfaces";
|
|
4
|
+
import { IWeightInitializer } from "./nn.weights";
|
|
5
|
+
export interface ISynapseBuilder extends ILinkBuilder {
|
|
6
|
+
withWeight(weight: number): ISynapseBuilder;
|
|
7
|
+
}
|
|
8
|
+
export interface ISynapseBuilderConstructor<T extends ISynapseBuilder> {
|
|
9
|
+
new (weight?: number): T;
|
|
10
|
+
}
|
|
11
|
+
export interface ILayerConnectionBuilder {
|
|
12
|
+
withType(type: LayerConnectionType): ILayerConnectionBuilder;
|
|
13
|
+
withWeightInitializer(initializer: IWeightInitializer): ILayerConnectionBuilder;
|
|
14
|
+
withSynapseBuilder(builder: SynapseBuilder): ILayerConnectionBuilder;
|
|
15
|
+
build(source: ILayer<INeuron>, target: ILayer<INeuron>): ILayerConnection<ISynapse> | undefined;
|
|
16
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"nn.interfaces.builder.js","sourceRoot":"","sources":["../../src/neuralnetwork/nn.interfaces.builder.ts"],"names":[],"mappings":"","sourcesContent":["import { ILinkBuilder } from \"../graph\";\r\nimport { SynapseBuilder } from \"./nn.builders\";\r\nimport { ILayer, ILayerConnection, INeuron, ISynapse, LayerConnectionType } from \"./nn.interfaces\";\r\nimport { IWeightInitializer } from \"./nn.weights\";\r\n\r\nexport interface ISynapseBuilder extends ILinkBuilder {\r\n withWeight(weight: number): ISynapseBuilder;\r\n}\r\n\r\nexport interface ISynapseBuilderConstructor<T extends ISynapseBuilder> {\r\n new (weight?: number): T;\r\n}\r\n\r\nexport interface ILayerConnectionBuilder {\r\n withType(type: LayerConnectionType): ILayerConnectionBuilder;\r\n withWeightInitializer(initializer: IWeightInitializer): ILayerConnectionBuilder;\r\n withSynapseBuilder(builder: SynapseBuilder): ILayerConnectionBuilder;\r\n build(source: ILayer<INeuron>, target: ILayer<INeuron>): ILayerConnection<ISynapse> | undefined;\r\n}\r\n"]}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import { ILinkSet, INode, INodeSet, IOlink } from "../graph";
|
|
2
|
+
export interface INeuron extends INode {
|
|
3
|
+
reset(): void;
|
|
4
|
+
}
|
|
5
|
+
export interface ILayer<N extends INeuron> extends INodeSet<N> {
|
|
6
|
+
}
|
|
7
|
+
export interface ILayerConnection<L extends ISynapse> extends ILinkSet<L> {
|
|
8
|
+
}
|
|
9
|
+
export declare enum LayerConnectionType {
|
|
10
|
+
FullyConnected = "fully_connected",
|
|
11
|
+
OneToOne = "one_to_one",
|
|
12
|
+
Unknown = "unknown"
|
|
13
|
+
}
|
|
14
|
+
export interface ISynapse extends IOlink {
|
|
15
|
+
weight: number;
|
|
16
|
+
}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/// <summary>
|
|
2
|
+
/// Describes how two neural layers are connected.
|
|
3
|
+
/// </summary>
|
|
4
|
+
export var LayerConnectionType;
|
|
5
|
+
(function (LayerConnectionType) {
|
|
6
|
+
/// <summary>
|
|
7
|
+
/// Every neuron in the source layer is connected to every neuron in the target layer.
|
|
8
|
+
/// This is the standard connection in most MLP architectures.
|
|
9
|
+
/// </summary>
|
|
10
|
+
LayerConnectionType["FullyConnected"] = "fully_connected";
|
|
11
|
+
/// <summary>
|
|
12
|
+
/// Each neuron in the source layer is connected to the neuron with the same index in the target layer.
|
|
13
|
+
/// Requires source and target layers to have the same number of neurons.
|
|
14
|
+
/// </summary>
|
|
15
|
+
LayerConnectionType["OneToOne"] = "one_to_one";
|
|
16
|
+
LayerConnectionType["Unknown"] = "unknown";
|
|
17
|
+
})(LayerConnectionType || (LayerConnectionType = {}));
|
|
18
|
+
//# sourceMappingURL=nn.interfaces.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"nn.interfaces.js","sourceRoot":"","sources":["../../src/neuralnetwork/nn.interfaces.ts"],"names":[],"mappings":"AAoBA,aAAa;AACb,kDAAkD;AAClD,cAAc;AACd,MAAM,CAAN,IAAY,mBAcX;AAdD,WAAY,mBAAmB;IAC3B,aAAa;IACb,sFAAsF;IACtF,8DAA8D;IAC9D,cAAc;IACd,yDAAkC,CAAA;IAElC,aAAa;IACb,uGAAuG;IACvG,yEAAyE;IACzE,cAAc;IACd,8CAAuB,CAAA;IAEvB,0CAAmB,CAAA;AACvB,CAAC,EAdW,mBAAmB,KAAnB,mBAAmB,QAc9B","sourcesContent":["import { ILinkSet, INode, INodeSet, IOlink } from \"../graph\";\r\n\r\n\r\n\r\n/// <summary>\r\n/// Represents a generic neuron (node) in a neural network.\r\n/// This interface is extended by specific neuron types (e.g., spiking, activation-based).\r\n/// </summary>\r\nexport interface INeuron extends INode {\r\n /// <summary>Resets the neuron to its initial state</summary>\r\n reset(): void;\r\n}\r\n\r\n/// <summary>\r\n/// Represents a Layer in a neural network.\r\n/// </summary>\r\nexport interface ILayer<N extends INeuron> extends INodeSet<N> {}\r\n\r\nexport interface ILayerConnection<L extends ISynapse> extends ILinkSet<L> {}\r\n\r\n/// <summary>\r\n/// Describes how two neural layers are connected.\r\n/// </summary>\r\nexport enum LayerConnectionType {\r\n /// <summary>\r\n /// Every neuron in the source layer is connected to every neuron in the target layer.\r\n /// This is the standard connection in most MLP architectures.\r\n /// </summary>\r\n FullyConnected = \"fully_connected\",\r\n\r\n /// <summary>\r\n /// Each neuron in the source layer is connected to the neuron with the same index in the target layer.\r\n /// Requires source and target layers to have the same number of neurons.\r\n /// </summary>\r\n OneToOne = \"one_to_one\",\r\n\r\n Unknown = \"unknown\",\r\n}\r\n\r\n/// <summary>\r\n/// Represents a synapse (link) between two neurons in a network.\r\n/// This interface is extended by specific synapse types (e.g., with weights, delays, STDP).\r\n/// </summary>\r\nexport interface ISynapse extends IOlink {\r\n /// <summary>Synaptic weight</summary>\r\n weight: number;\r\n}\r\n"]}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import { ICartesian } from "../geometry";
|
|
2
|
+
import { GraphNode, IOlink } from "../graph";
|
|
3
|
+
import { Nullable } from "../types";
|
|
4
|
+
import { INeuron } from "./nn.interfaces";
|
|
5
|
+
export declare class Neuron extends GraphNode implements INeuron {
|
|
6
|
+
constructor(onsc?: Nullable<IOlink[]>, opsc?: Nullable<IOlink[]>, position?: ICartesian);
|
|
7
|
+
reset(): void;
|
|
8
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"nn.neuron.js","sourceRoot":"","sources":["../../src/neuralnetwork/nn.neuron.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,SAAS,EAAU,MAAM,UAAU,CAAC;AAI7C,MAAM,OAAO,MAAO,SAAQ,SAAS;IAEjC,YAAY,OAA2B,IAAI,EAAE,OAA2B,IAAI,EAAE,QAAqB;QAC/F,KAAK,CAAC,IAAI,EAAC,IAAI,EAAC,QAAQ,CAAC,CAAC;IAC9B,CAAC;IAED,KAAK;QACD,IAAI,CAAC,GAAG,GAAG,SAAS,CAAC;IACzB,CAAC;CACJ","sourcesContent":["import { ICartesian } from \"../geometry\";\r\nimport { GraphNode, IOlink } from \"../graph\";\r\nimport { Nullable } from \"../types\";\r\nimport { INeuron } from \"./nn.interfaces\";\r\n\r\nexport class Neuron extends GraphNode implements INeuron {\r\n\r\n constructor(onsc: Nullable<IOlink[]> = null, opsc: Nullable<IOlink[]> = null, position?: ICartesian){\r\n super(onsc,opsc,position);\r\n }\r\n\r\n reset(): void {\r\n this.bag = undefined;\r\n }\r\n}\r\n"]}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { __decorate, __metadata } from "tslib";
|
|
2
|
+
import { cloneable, GraphOLink } from "../graph";
|
|
3
|
+
export class Synapse extends GraphOLink {
|
|
4
|
+
constructor(oini, ofin, weight = 0) {
|
|
5
|
+
super(oini, ofin);
|
|
6
|
+
this.weight = 0;
|
|
7
|
+
this.weight = weight;
|
|
8
|
+
}
|
|
9
|
+
}
|
|
10
|
+
__decorate([
|
|
11
|
+
cloneable,
|
|
12
|
+
__metadata("design:type", Number)
|
|
13
|
+
], Synapse.prototype, "weight", void 0);
|
|
14
|
+
//# sourceMappingURL=nn.synapse.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"nn.synapse.js","sourceRoot":"","sources":["../../src/neuralnetwork/nn.synapse.ts"],"names":[],"mappings":";AAAA,OAAO,EAAE,SAAS,EAAE,UAAU,EAAS,MAAM,UAAU,CAAC;AAGxD,MAAM,OAAO,OAAQ,SAAQ,UAAU;IAEnC,YAAmB,IAAW,EAAE,IAAW,EAAE,SAAiB,CAAC;QAC3D,KAAK,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC;QAFJ,WAAM,GAAW,CAAC,CAAC;QAGjC,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;IACzB,CAAC;CACJ;AALqB;IAAjB,SAAS;;uCAA2B","sourcesContent":["import { cloneable, GraphOLink, INode } from \"../graph\";\r\nimport { ISynapse } from \"./nn.interfaces\";\r\n\r\nexport class Synapse extends GraphOLink implements ISynapse {\r\n @cloneable public weight: number = 0;\r\n public constructor(oini: INode, ofin: INode, weight: number = 0) {\r\n super(oini, ofin);\r\n this.weight = weight;\r\n }\r\n}\r\n"]}
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
export declare class WeightInit {
|
|
2
|
+
static Glorot(fanIn: number, fanOut: number): number;
|
|
3
|
+
static He(fanIn: number): number;
|
|
4
|
+
static Normal(mean?: number, stdDev?: number): number;
|
|
5
|
+
static Uniform(min?: number, max?: number): number;
|
|
6
|
+
}
|
|
7
|
+
export declare enum WeightInitializerType {
|
|
8
|
+
Glorot = "glorot",
|
|
9
|
+
He = "he",
|
|
10
|
+
Normal = "normal",
|
|
11
|
+
Uniform = "uniform"
|
|
12
|
+
}
|
|
13
|
+
export interface IWeightInitializer {
|
|
14
|
+
type: WeightInitializerType;
|
|
15
|
+
next(): number;
|
|
16
|
+
}
|
|
17
|
+
export declare class Glorot implements IWeightInitializer {
|
|
18
|
+
private limit;
|
|
19
|
+
constructor(fanIn: number, fanOut: number);
|
|
20
|
+
get type(): WeightInitializerType;
|
|
21
|
+
next(): number;
|
|
22
|
+
}
|
|
23
|
+
export declare class He implements IWeightInitializer {
|
|
24
|
+
private limit;
|
|
25
|
+
constructor(fanIn: number);
|
|
26
|
+
get type(): WeightInitializerType;
|
|
27
|
+
next(): number;
|
|
28
|
+
}
|
|
29
|
+
export declare class Normal implements IWeightInitializer {
|
|
30
|
+
private mean;
|
|
31
|
+
private stdDev;
|
|
32
|
+
constructor(mean?: number, stdDev?: number);
|
|
33
|
+
get type(): WeightInitializerType;
|
|
34
|
+
next(): number;
|
|
35
|
+
}
|
|
36
|
+
export declare class Uniform implements IWeightInitializer {
|
|
37
|
+
private min;
|
|
38
|
+
private max;
|
|
39
|
+
constructor(min?: number, max?: number);
|
|
40
|
+
get type(): WeightInitializerType;
|
|
41
|
+
next(): number;
|
|
42
|
+
}
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
/// <summary>
|
|
2
|
+
/// WeightInit provides commonly used weight initialization strategies for neural networks.
|
|
3
|
+
/// These initializers are not specific to MLPs and are widely used in various types of neural architectures
|
|
4
|
+
/// including CNNs, RNNs, Transformers, and others, especially in deep learning.
|
|
5
|
+
/// </summary>
|
|
6
|
+
export class WeightInit {
|
|
7
|
+
/// <summary>
|
|
8
|
+
/// Glorot/Xavier initialization.
|
|
9
|
+
/// Typically used with tanh or sigmoid activations.
|
|
10
|
+
/// Scales weights to keep variance roughly equal across layers.
|
|
11
|
+
/// </summary>
|
|
12
|
+
static Glorot(fanIn, fanOut) {
|
|
13
|
+
const limit = Math.sqrt(6 / (fanIn + fanOut));
|
|
14
|
+
return (Math.random() * 2 - 1) * limit;
|
|
15
|
+
}
|
|
16
|
+
/// <summary>
|
|
17
|
+
/// He initialization.
|
|
18
|
+
/// Typically used with ReLU and its variants.
|
|
19
|
+
/// Helps prevent vanishing gradients in deep networks.
|
|
20
|
+
/// </summary>
|
|
21
|
+
static He(fanIn) {
|
|
22
|
+
const limit = Math.sqrt(6 / fanIn);
|
|
23
|
+
return (Math.random() * 2 - 1) * limit;
|
|
24
|
+
}
|
|
25
|
+
/// <summary>
|
|
26
|
+
/// Gaussian initialization using the Box-Muller transform.
|
|
27
|
+
/// Can be used as a general-purpose initializer.
|
|
28
|
+
/// </summary>
|
|
29
|
+
static Normal(mean = 0, stdDev = 1) {
|
|
30
|
+
let u = 1 - Math.random();
|
|
31
|
+
let v = 1 - Math.random();
|
|
32
|
+
return mean + stdDev * Math.sqrt(-2.0 * Math.log(u)) * Math.cos(2.0 * Math.PI * v);
|
|
33
|
+
}
|
|
34
|
+
/// <summary>
|
|
35
|
+
/// Uniform initialization between min and max values.
|
|
36
|
+
/// </summary>
|
|
37
|
+
static Uniform(min = -1, max = 1) {
|
|
38
|
+
return Math.random() * (max - min) + min;
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
export var WeightInitializerType;
|
|
42
|
+
(function (WeightInitializerType) {
|
|
43
|
+
/// <summary>
|
|
44
|
+
/// Glorot/Xavier initializer, typically used with tanh or sigmoid activations.
|
|
45
|
+
/// </summary>
|
|
46
|
+
WeightInitializerType["Glorot"] = "glorot";
|
|
47
|
+
/// <summary>
|
|
48
|
+
/// He initializer, typically used with ReLU-like activations.
|
|
49
|
+
/// </summary>
|
|
50
|
+
WeightInitializerType["He"] = "he";
|
|
51
|
+
/// <summary>
|
|
52
|
+
/// Normal (Gaussian) initializer using the Box-Muller transform.
|
|
53
|
+
/// </summary>
|
|
54
|
+
WeightInitializerType["Normal"] = "normal";
|
|
55
|
+
/// <summary>
|
|
56
|
+
/// Uniform initializer between min and max values.
|
|
57
|
+
/// </summary>
|
|
58
|
+
WeightInitializerType["Uniform"] = "uniform";
|
|
59
|
+
})(WeightInitializerType || (WeightInitializerType = {}));
|
|
60
|
+
/// <summary>
|
|
61
|
+
/// Glorot/Xavier initializer class for use in builder chains.
|
|
62
|
+
/// </summary>
|
|
63
|
+
export class Glorot {
|
|
64
|
+
constructor(fanIn, fanOut) {
|
|
65
|
+
this.limit = Math.sqrt(6 / (fanIn + fanOut));
|
|
66
|
+
}
|
|
67
|
+
get type() {
|
|
68
|
+
return WeightInitializerType.Glorot;
|
|
69
|
+
}
|
|
70
|
+
next() {
|
|
71
|
+
return (Math.random() * 2 - 1) * this.limit;
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
/// <summary>
|
|
75
|
+
/// He initializer class, typically used with ReLU-like activations.
|
|
76
|
+
/// </summary>
|
|
77
|
+
export class He {
|
|
78
|
+
constructor(fanIn) {
|
|
79
|
+
this.limit = Math.sqrt(6 / fanIn);
|
|
80
|
+
}
|
|
81
|
+
get type() {
|
|
82
|
+
return WeightInitializerType.He;
|
|
83
|
+
}
|
|
84
|
+
next() {
|
|
85
|
+
return (Math.random() * 2 - 1) * this.limit;
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
/// <summary>
|
|
89
|
+
/// Normal (Gaussian) initializer using the Box-Muller transform.
|
|
90
|
+
/// </summary>
|
|
91
|
+
export class Normal {
|
|
92
|
+
constructor(mean = 0, stdDev = 1) {
|
|
93
|
+
this.mean = mean;
|
|
94
|
+
this.stdDev = stdDev;
|
|
95
|
+
}
|
|
96
|
+
get type() {
|
|
97
|
+
return WeightInitializerType.Normal;
|
|
98
|
+
}
|
|
99
|
+
next() {
|
|
100
|
+
let u = 1 - Math.random();
|
|
101
|
+
let v = 1 - Math.random();
|
|
102
|
+
return this.mean + this.stdDev * Math.sqrt(-2.0 * Math.log(u)) * Math.cos(2.0 * Math.PI * v);
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
/// <summary>
|
|
106
|
+
/// Uniform initializer between min and max.
|
|
107
|
+
/// </summary>
|
|
108
|
+
export class Uniform {
|
|
109
|
+
constructor(min = -1, max = 1) {
|
|
110
|
+
this.min = min;
|
|
111
|
+
this.max = max;
|
|
112
|
+
}
|
|
113
|
+
get type() {
|
|
114
|
+
return WeightInitializerType.Uniform;
|
|
115
|
+
}
|
|
116
|
+
next() {
|
|
117
|
+
return Math.random() * (this.max - this.min) + this.min;
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
//# sourceMappingURL=nn.weights.js.map
|