NeuralNetworks 0.2.5__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. NeuralNetworks/MLP/MLP.py +64 -0
  2. NeuralNetworks/MLP/_MLP_tools.py +74 -0
  3. NeuralNetworks/MLP/__init__.py +2 -93
  4. NeuralNetworks/Trainer/Trainer.py +98 -0
  5. NeuralNetworks/Trainer/_Trainer_tools.py +155 -0
  6. NeuralNetworks/Trainer/__init__.py +2 -45
  7. NeuralNetworks/VAE/VAE.py +114 -0
  8. NeuralNetworks/VAE/_VAE_tools.py +6 -0
  9. NeuralNetworks/VAE/__init__.py +8 -0
  10. NeuralNetworks/{Dependances/matplot.py → _Dependances/__init__.py} +12 -12
  11. NeuralNetworks/_Dependances/pytorch.py +125 -0
  12. NeuralNetworks/_Dependances/tools.py +15 -0
  13. NeuralNetworks/_UI/Learnings.py +31 -0
  14. NeuralNetworks/_UI/Losses.py +48 -0
  15. NeuralNetworks/{UI → _UI}/__init__.py +1 -1
  16. NeuralNetworks/_UI/_plot.py +50 -0
  17. NeuralNetworks/__init__.py +6 -7
  18. NeuralNetworks/_shared/__init__.py +8 -0
  19. NeuralNetworks/_shared/module.py +115 -0
  20. {neuralnetworks-0.2.5.dist-info → neuralnetworks-0.2.8.dist-info}/METADATA +8 -15
  21. neuralnetworks-0.2.8.dist-info/RECORD +24 -0
  22. {neuralnetworks-0.2.5.dist-info → neuralnetworks-0.2.8.dist-info}/WHEEL +1 -1
  23. NeuralNetworks/Dependances/__init__.py +0 -75
  24. NeuralNetworks/Dependances/pytorch.py +0 -111
  25. NeuralNetworks/MLP/FourierFeatures.py +0 -89
  26. NeuralNetworks/MLP/Layers.py +0 -31
  27. NeuralNetworks/MLP/inference.py +0 -26
  28. NeuralNetworks/Trainer/dynamic_learning_rate.py +0 -79
  29. NeuralNetworks/Trainer/sample_data.py +0 -19
  30. NeuralNetworks/Trainer/train.py +0 -75
  31. NeuralNetworks/UI/Learnings.py +0 -45
  32. NeuralNetworks/UI/Losses.py +0 -45
  33. neuralnetworks-0.2.5.dist-info/RECORD +0 -20
  34. {neuralnetworks-0.2.5.dist-info → neuralnetworks-0.2.8.dist-info}/licenses/LICENSE +0 -0
  35. {neuralnetworks-0.2.5.dist-info → neuralnetworks-0.2.8.dist-info}/top_level.txt +0 -0
@@ -1,111 +0,0 @@
1
- # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
3
- # This program is free software: you can redistribute it and/or modify
4
- # it under the terms of the GNU General Public License as published by
5
- # the Free Software Foundation, either version 3 of the License, or
6
- # (at your option) any later version.
7
-
8
- import os
9
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
10
-
11
- import platform
12
-
13
- import torch
14
- import torch.optim as optim
15
- import torch.nn as nn
16
- import torch.quantization as tq
17
- from torch.amp import autocast, GradScaler
18
- from torch.utils.data import TensorDataset, DataLoader
19
-
20
- from torchmetrics.image import PeakSignalNoiseRatio as PSNR
21
- from torchvision.transforms import ToTensor, Resize, Compose
22
-
23
- torch.cuda.empty_cache()
24
- def get_best_device():
25
-
26
- os_name = platform.system().lower()
27
-
28
- # =========== APPLE SILICON (macOS) ===========
29
- if os_name == "darwin":
30
- if torch.backends.mps.is_available():
31
- return torch.device("mps")
32
-
33
- # =========== WINDOWS ===========
34
- if os_name == "windows":
35
- # 1) CUDA
36
- if torch.cuda.is_available():
37
- return torch.device("cuda")
38
-
39
- # =========== LINUX ===========
40
- if os_name == "linux":
41
- # 1) CUDA (Nvidia)
42
- if torch.cuda.is_available():
43
- return torch.device("cuda")
44
- # 2) ROCm (AMD)
45
- elif hasattr(torch.backends, "hip") and torch.backends.hip.is_available():
46
- return torch.device("cuda")
47
-
48
- # 3) Intel oneAPI / XPU
49
- elif hasattr(torch, "xpu") and torch.xpu.is_available():
50
- return torch.device("xpu")
51
-
52
- # =========== Unknown OS ===========
53
- return torch.device("cpu")
54
- device = get_best_device()
55
-
56
- # --- Optimisations CUDA ---
57
- # Accélération des convolutions et matmul
58
- torch.backends.cudnn.benchmark = True
59
- torch.backends.cudnn.enabled = True
60
- torch.backends.cuda.matmul.allow_tf32 = True
61
- torch.backends.cudnn.allow_tf32 = True
62
-
63
- # Paramètres autograd
64
- torch.autograd.set_detect_anomaly(False)
65
- torch.autograd.profiler.profile(enabled=False)
66
- torch.use_deterministic_algorithms(False)
67
-
68
- torch._inductor.config.max_autotune = "max"
69
-
70
- norm_list = {
71
- "Relu": nn.ReLU(),
72
- "LeakyRelu": nn.LeakyReLU(),
73
- "ELU": nn.ELU(),
74
- "SELU": nn.SELU(),
75
- "GELU": nn.GELU(),
76
- "Mish": nn.Mish(),
77
- "Sigmoid": nn.Sigmoid(),
78
- "Tanh": nn.Tanh(),
79
- "Hardtanh": nn.Hardtanh(),
80
- "Softplus": nn.Softplus(),
81
- "Softsign": nn.Softsign()
82
- }
83
-
84
- crit_list = {
85
- "MSE": nn.MSELoss(),
86
- "L1": nn.L1Loss(),
87
- "SmoothL1": nn.SmoothL1Loss(),
88
- "Huber": nn.HuberLoss(),
89
- "CrossEntropy": nn.CrossEntropyLoss(),
90
- "KLDiv": nn.KLDivLoss(),
91
- "PoissonNLL": nn.PoissonNLLLoss(),
92
- "MultiLabelSoftMargin": nn.MultiLabelSoftMarginLoss()
93
- }
94
-
95
- def optim_list(params):
96
- return {
97
- "Adadelta": optim.Adadelta(params),
98
- "Adafactor": optim.Adafactor(params),
99
- "Adam": optim.Adam(params),
100
- "AdamW": optim.AdamW(params),
101
- "Adamax": optim.Adamax(params),
102
- "ASGD": optim.ASGD(params),
103
- "NAdam": optim.NAdam(params),
104
- "RAdam": optim.RAdam(params),
105
- "RMSprop": optim.RMSprop(params),
106
- "Rprop": optim.Rprop(params),
107
- "SGD": optim.SGD(params)
108
- }
109
-
110
- def tensorise(obj):
111
- return torch.as_tensor(obj, dtype=torch.float32, device='cpu')
@@ -1,89 +0,0 @@
1
- # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
3
- # This program is free software: you can redistribute it and/or modify
4
- # it under the terms of the GNU General Public License as published by
5
- # the Free Software Foundation, either version 3 of the License, or
6
- # (at your option) any later version.
7
-
8
- from ..Dependances import torch, nn, np
9
-
10
- class FourierEncoding (nn.Module):
11
- """
12
- Encodage de Fourier aléatoire pour enrichir la représentation des entrées.
13
-
14
- Cette couche projette les entrées dans un espace fréquentiel à l'aide
15
- d'une matrice de projection apprise, puis applique des fonctions sinus
16
- et cosinus afin de capturer des variations à haute fréquence.
17
-
18
- Parameters
19
- ----------
20
- nb_fourier : int
21
- Nombre de composantes de Fourier.
22
- input_size : int
23
- Dimension des entrées.
24
- sigma : float
25
- Écart-type utilisé pour l'initialisation de la matrice de projection.
26
- """
27
- def __init__ (self, nb_fourier, input_size, sigma):
28
- super ().__init__ ()
29
- self.B = nn.Parameter (torch.randn (nb_fourier, input_size) * sigma)
30
-
31
- def forward (self, x):
32
- """
33
- Applique l'encodage de Fourier aux entrées.
34
-
35
- Parameters
36
- ----------
37
- x : torch.Tensor
38
- Tensor d'entrée de shape `(N, input_size)`.
39
-
40
- Returns
41
- -------
42
- torch.Tensor
43
- Tensor encodé de shape `(N, 2 * nb_fourier)`, correspondant
44
- à la concaténation des cosinus et sinus.
45
- """
46
- vp = 2 * np.pi * x @ self.B.T
47
- return torch.cat ((torch.cos (vp), torch.sin (vp)), dim = -1)
48
-
49
- def encode (input_size, output_size, sigmas, fourier_input_size, nb_fourier):
50
- """
51
- Construit les modules d'encodage (Fourier ou identité) et la couche de fusion associée.
52
-
53
- Si `sigmas` est `None`, aucun encodage de Fourier n'est appliqué et les
54
- entrées sont transmises directement au réseau.
55
- Sinon, plusieurs encodages de Fourier sont créés (un par sigma), et
56
- leurs sorties sont fusionnées via une couche linéaire.
57
-
58
- Parameters
59
- ----------
60
- input_size : int
61
- Dimension des entrées.
62
- output_size : int
63
- Dimension de sortie du réseau.
64
- sigmas : list[float] ou None
65
- Liste des paramètres sigma pour les encodages de Fourier.
66
- fourier_input_size : int
67
- Dimension attendue après encodage (non utilisée directement ici,
68
- mais conservée pour cohérence avec l'architecture globale).
69
- nb_fourier : int
70
- Nombre de composantes de Fourier par encodage.
71
-
72
- Returns
73
- -------
74
- encodings : torch.nn.ModuleList (scripté)
75
- Liste des modules d'encodage (Fourier ou identité).
76
- f : torch.nn.Module (scripté)
77
- Module de fusion des encodages (identité ou couche linéaire).
78
- """
79
- if sigmas is None:
80
- encodings = nn.ModuleList (
81
- [nn.Identity ()]
82
- )
83
- f = nn.Identity ()
84
- else:
85
- encodings = nn.ModuleList (
86
- [FourierEncoding (nb_fourier, input_size, sigma) for sigma in sigmas]
87
- )
88
- f = nn.Linear (len (encodings) * output_size, output_size)
89
- return torch.jit.script (encodings), torch.jit.script (f)
@@ -1,31 +0,0 @@
1
- # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
3
- # This program is free software: you can redistribute it and/or modify
4
- # it under the terms of the GNU General Public License as published by
5
- # the Free Software Foundation, either version 3 of the License, or
6
- # (at your option) any later version.
7
-
8
- from ..Dependances import torch, nn
9
-
10
- def create_layers(
11
- input_size,
12
- output_size,
13
- hidden_layers,
14
- sigmas,
15
- fourier_input_size,
16
- nb_fourier,
17
- norm):
18
-
19
- layer_list = [
20
- nn.Linear (input_size if sigmas is None else 2 * nb_fourier, hidden_layers [0]),
21
- norm
22
- ]
23
-
24
- for k in range (len (hidden_layers) - 1):
25
- layer_list.extend ([
26
- nn.Linear (hidden_layers [k], hidden_layers [k + 1]),
27
- norm
28
- ])
29
- layer_list.append (nn.Linear (hidden_layers [-1], output_size))
30
-
31
- return torch.jit.script (nn.Sequential (*layer_list))
@@ -1,26 +0,0 @@
1
- # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
3
- # This program is free software: you can redistribute it and/or modify
4
- # it under the terms of the GNU General Public License as published by
5
- # the Free Software Foundation, either version 3 of the License, or
6
- # (at your option) any later version.
7
-
8
- from ..Dependances import torch, np, device
9
-
10
- def infer (net, x):
11
- with torch.no_grad ():
12
- x = x.unsqueeze (0) if x.dim () == 1 else x
13
-
14
- net = net.to (device)
15
- x = x.to (device)
16
- results_list = [net.model (encoding (x)) for encoding in net.encodings]
17
- x = x.to ('cpu')
18
-
19
- output = np.array (
20
- net.f (
21
- torch.cat (results_list, dim = 1)
22
- ).cpu ().numpy ().flatten ()
23
- )
24
- net = net.to ('cpu')
25
-
26
- return output
@@ -1,79 +0,0 @@
1
- # NeuralNetworks- Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
3
- # This program is free software: you can redistribute it and/or modify
4
- # it under the terms of the GNU General Public License as published by
5
- # the Free Software Foundation, either version 3 of the License, or
6
- # (at your option) any later version.
7
-
8
- from ..Dependances import np
9
-
10
- def generate_learning_rate (
11
- Nb_iter,
12
- X0,
13
- mode = "smoother",
14
- first = 0.4,
15
- second = 1,
16
- Xi = 5e-4,
17
- Xf = 1e-6):
18
-
19
- infl = int (first * Nb_iter)
20
- Plat = int (second * Nb_iter)
21
-
22
- def smoothstep (x0, xa, n, m):
23
- values = []
24
- if m == "smooth":
25
- for i in range (n):
26
- t = i / (n - 1) # t dans [0, 1]
27
- s = t * t * (3 - 2 * t) # smoothstep
28
- x = x0 + (xa - x0) * s
29
- values.append (x)
30
- elif m == "smoother":
31
- for i in range(n):
32
- t = i / (n - 1) # t dans [0, 1]
33
- s = t * t * t * (t * (6 * t - 15) + 10)
34
- x = x0 + (xa - x0) * s
35
- values.append(x)
36
- else:
37
- raise ValueError("mode doit être 'smooth' ou 'smoother'")
38
- return values
39
-
40
- cuv1 = smoothstep (X0, Xi, infl, mode)
41
- cuv2 = smoothstep (Xi, Xf, Plat - infl, mode)
42
- cuv3 = [Xf for _ in range (Plat, Nb_iter)]
43
-
44
- return np.array (cuv1 + cuv2 + cuv3)
45
-
46
- def update_lr (losses, lrs, epoch, lr):
47
-
48
- loss = losses[-1] + (losses[-1] - losses[0])/len(losses)
49
-
50
- n = 9
51
- # Points de contrôle (multiplicité finale = dérivée nulle)
52
- P = np.array([
53
- 0.0,
54
- 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
55
- 1.0, 1.0
56
- ])
57
-
58
- # Coefficients binomiaux (précomputés UNE FOIS)
59
- C = np.array([1, 9, 36, 84, 126, 126, 84, 36, 9, 1], dtype=float)
60
-
61
- x = np.clip(loss, 0.0, 1.0)
62
- t = np.sqrt(x)
63
-
64
- u = 1.0 - t
65
-
66
- # Bernstein vectorisé
67
- y = (
68
- C[0] * u**9 * P[0] +
69
- C[1] * u**8 * t * P[1] +
70
- C[2] * u**7 * t**2 * P[2] +
71
- C[3] * u**6 * t**3 * P[3] +
72
- C[4] * u**5 * t**4 * P[4] +
73
- C[5] * u**4 * t**5 * P[5] +
74
- C[6] * u**3 * t**6 * P[6] +
75
- C[7] * u**2 * t**7 * P[7] +
76
- C[8] * u * t**8 * P[8] +
77
- C[9] * t**9 * P[9]
78
- )
79
- return np.clip (max(0.001 * y, lrs [epoch]), 0.0, lr)
@@ -1,19 +0,0 @@
1
- # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
3
- # This program is free software: you can redistribute it and/or modify
4
- # it under the terms of the GNU General Public License as published by
5
- # the Free Software Foundation, either version 3 of the License, or
6
- # (at your option) any later version.
7
-
8
- from ..Dependances import train_test_split
9
-
10
- def sample_data (inputs, outputs, test_size):
11
- if test_size is None:
12
- return inputs, inputs, outputs, outputs
13
- else:
14
- return train_test_split (
15
- inputs,
16
- outputs,
17
- test_size = test_size,
18
- random_state = 42
19
- )
@@ -1,75 +0,0 @@
1
- # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
3
- # This program is free software: you can redistribute it and/or modify
4
- # it under the terms of the GNU General Public License as published by
5
- # the Free Software Foundation, either version 3 of the License, or
6
- # (at your option) any later version.
7
-
8
- from ..Dependances import torch, GradScaler, device, tqdm, autocast
9
-
10
- from .dynamic_learning_rate import generate_learning_rate, update_lr
11
-
12
- def train_f (Trainer, num_epochs = 1, activate_tqdm = True):
13
- dev = str (device)
14
- scaler = GradScaler (dev)
15
-
16
- lrs = generate_learning_rate (num_epochs, Trainer.init_lr)
17
-
18
- Trainer.X_train = Trainer.X_train.to (device)
19
- Trainer.y_train = Trainer.y_train.to (device)
20
- n_samples = Trainer.X_train.size (0)
21
-
22
- torch.cuda.empty_cache ()
23
- for k, net in enumerate (Trainer.nets):
24
- net = net.to (device)
25
- net.learnings.append(Trainer.init_lr)
26
-
27
- pbar = tqdm (
28
- range (num_epochs),
29
- desc = f"train epoch",
30
- disable = not (activate_tqdm)
31
- )
32
-
33
- for epoch in pbar:
34
- # Génération d'un ordre aléatoire des indices
35
- perm = torch.randperm (n_samples, device = device)
36
- epoch_loss = 0.0
37
-
38
- # --- Parcours des mini-batchs ---
39
- for i in range (0, n_samples, Trainer.batch_size):
40
- idx = perm [i : i + Trainer.batch_size]
41
-
42
- # Fonction interne calculant la perte et les gradients
43
- def closure ():
44
- Trainer.optims [k].zero_grad (set_to_none = True)
45
- with autocast (dev):
46
- loss = Trainer.crit (
47
- net.f (
48
- torch.cat (
49
- [net.model (encoding (Trainer.X_train [idx]))for encoding in net.encodings],
50
- dim = 1
51
- )
52
- ),
53
- Trainer.y_train[idx]
54
- )
55
- scaler.scale (loss).backward ()
56
- return loss
57
-
58
- epoch_loss += closure()
59
- scaler.step (Trainer.optims [k])
60
- scaler.update ()
61
-
62
- # --- Stockage de la perte de l'époque ---
63
- #Trainer.frequencies.append(net.encodings[0].B.detach().cpu().clone())
64
- net.losses.append (epoch_loss.item ())
65
- net.learnings.append (update_lr (net.losses [-20:], lrs, epoch, net.learnings[-1]))
66
- for param_group in Trainer.optims [k].param_groups:
67
- param_group ['lr'] = net.learnings[-1]
68
-
69
- pbar.set_postfix(loss=f"{epoch_loss:.5f}",lr=f"{net.learnings[-1]:.5f}")
70
-
71
- net = net.to ('cpu')
72
- net.learnings.pop(-1)
73
- Trainer.X_train = Trainer.X_train.to ('cpu')
74
- Trainer.y_train = Trainer.y_train.to ('cpu')
75
- torch.cuda.empty_cache ()
@@ -1,45 +0,0 @@
1
- # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
3
- # This program is free software: you can redistribute it and/or modify
4
- # it under the terms of the GNU General Public License as published by
5
- # the Free Software Foundation, either version 3 of the License, or
6
- # (at your option) any later version.
7
-
8
- from ..Dependances import plt, np
9
-
10
- def learnings(*nets):
11
-
12
- # --- Initialisation de la figure ---
13
- fig, ax1 = plt.subplots()
14
- fig.set_figheight(5)
15
- fig.set_figwidth(5)
16
-
17
- # --- Définition des limites des axes ---
18
- all_learnings = [[lr for lr in net.learnings] for net in nets]
19
- if max(len(lst) for lst in all_learnings) == 1:
20
- lenlearnings = 2
21
- else:
22
- lenlearnings = max(len(lst) for lst in all_learnings)
23
- plt.xlim(1, lenlearnings)
24
-
25
- # --- Tracé des courbes de pertes pour chaque réseau ---
26
- for k, net in enumerate(nets):
27
- ax1.plot(
28
- np.arange(1, len(all_learnings[k]) + 1),
29
- all_learnings[k],
30
- label=net.name
31
- )
32
- ax1.set_xlabel("Epochs")
33
- ax1.set_ylabel("Learning rate")
34
- ax1.legend(loc="upper left")
35
- ax1.grid(True)
36
-
37
- plt.yscale('log', nonpositive='mask')
38
- # --- Affichage ---
39
- plt.legend()
40
- plt.xlabel("Epoch")
41
- plt.ylabel("Learning rate")
42
- fig.canvas.draw_idle()
43
- plt.tight_layout()
44
- plt.ion() # mode interactif
45
- plt.show()
@@ -1,45 +0,0 @@
1
- # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
3
- # This program is free software: you can redistribute it and/or modify
4
- # it under the terms of the GNU General Public License as published by
5
- # the Free Software Foundation, either version 3 of the License, or
6
- # (at your option) any later version.
7
-
8
- from ..Dependances import plt, np
9
-
10
- def losses(*nets):
11
-
12
- # --- Initialisation de la figure ---
13
- fig, ax1 = plt.subplots()
14
- fig.set_figheight(5)
15
- fig.set_figwidth(5)
16
-
17
- # --- Définition des limites des axes ---
18
- all_losses = [[loss for loss in net.losses] for net in nets]
19
- if max(len(lst) for lst in all_losses) == 1:
20
- lenlosses = 2
21
- else:
22
- lenlosses = max(len(lst) for lst in all_losses)
23
- plt.xlim(1, lenlosses)
24
-
25
- # --- Tracé des courbes de pertes pour chaque réseau ---
26
- for k, net in enumerate(nets):
27
- ax1.plot(
28
- np.arange(1, len(all_losses[k]) + 1),
29
- all_losses[k],
30
- label=net.name
31
- )
32
- ax1.set_xlabel("Epochs")
33
- ax1.set_ylabel("Loss")
34
- ax1.legend(loc="upper left")
35
- ax1.grid(True)
36
-
37
- plt.yscale('log', nonpositive='mask')
38
- # --- Affichage ---
39
- plt.legend()
40
- plt.xlabel("Epoch")
41
- plt.ylabel("Résidus")
42
- fig.canvas.draw_idle()
43
- plt.tight_layout()
44
- plt.ion() # mode interactif
45
- plt.show()
@@ -1,20 +0,0 @@
1
- NeuralNetworks/__init__.py,sha256=DSCQD_dezpOEjqAGUSQsocNYmNq8STPHZaLjIXXLCF8,668
2
- NeuralNetworks/Dependances/__init__.py,sha256=qEpDbSD8cCq-E5XVisNUVf3kZOYopDnQWToyRefPgKE,1227
3
- NeuralNetworks/Dependances/matplot.py,sha256=elS8u6DZHYP-8mHEpYNOw3jDzhCAWTld9tm3OAD46zw,957
4
- NeuralNetworks/Dependances/pytorch.py,sha256=RQlSV3-8uHAoEgK0FBae7O4Mdug7h_MY--sN1fK59qw,3329
5
- NeuralNetworks/MLP/FourierFeatures.py,sha256=klgRM1HK09oA2NRMDxQMjJJ-WoUd5hV1ip5hHe9rHjI,3250
6
- NeuralNetworks/MLP/Layers.py,sha256=WAksXsiMxaClyYTxPhlyQbwwj9qTtXs3EWCO1RqjUHY,945
7
- NeuralNetworks/MLP/__init__.py,sha256=v7h4Vcmay0_A83spe67HhT7TQ7cSKQX4eVVBJhzEWfk,3052
8
- NeuralNetworks/MLP/inference.py,sha256=9aL7pUx1LTVvrc6UYHX049UjODTgHY6cweFcp2gequQ,853
9
- NeuralNetworks/Trainer/__init__.py,sha256=v0qKqx9XkYWkuouNNy0jTHQ_cZqYhFj98qrwSXlDXy0,1711
10
- NeuralNetworks/Trainer/dynamic_learning_rate.py,sha256=1JAD-k0cjdL_71zGeeCUFOa61H4PzFITDjZ2nK0TzXU,2340
11
- NeuralNetworks/Trainer/sample_data.py,sha256=7waC9colb7DXU4yKMcgcCnPG3Guv-isipcgVHJPPCNE,673
12
- NeuralNetworks/Trainer/train.py,sha256=NAbHFKg4hl96OXq_i63lcRYwrPHiuKu7ihexakhpgDY,3182
13
- NeuralNetworks/UI/Learnings.py,sha256=4TBR5pcjyoBeL7eikNKM6xn25jnqL-mWT7hbrt9q-Gw,1418
14
- NeuralNetworks/UI/Losses.py,sha256=Tu5xuDiutR9a4xcZKpyWN_tzSDu3_fImEf8FbAEehio,1378
15
- NeuralNetworks/UI/__init__.py,sha256=L96xwQZJ-HoqqOGxaheosiDKHR3mRopuXkif--rO1J4,409
16
- neuralnetworks-0.2.5.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
17
- neuralnetworks-0.2.5.dist-info/METADATA,sha256=FjTIFWlGmzIjQmWhFLnTuUI7MfNl0U5jb7EjRQJ7lh8,18349
18
- neuralnetworks-0.2.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
- neuralnetworks-0.2.5.dist-info/top_level.txt,sha256=h18nmC1BX7avyAAwKh0OQWezxgXmOpmVtbFq-8Mcbms,15
20
- neuralnetworks-0.2.5.dist-info/RECORD,,