NeuralNetworks 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- NeuralNetworks/MLP/MLP.py +64 -0
- NeuralNetworks/MLP/_MLP_tools.py +74 -0
- NeuralNetworks/MLP/__init__.py +2 -80
- NeuralNetworks/Trainer/Trainer.py +98 -0
- NeuralNetworks/Trainer/_Trainer_tools.py +155 -0
- NeuralNetworks/Trainer/__init__.py +2 -52
- NeuralNetworks/VAE/VAE.py +114 -0
- NeuralNetworks/VAE/_VAE_tools.py +6 -0
- NeuralNetworks/VAE/__init__.py +8 -0
- NeuralNetworks/{Dependances/matplot.py → _Dependances/__init__.py} +12 -12
- NeuralNetworks/_Dependances/pytorch.py +125 -0
- NeuralNetworks/{MLP/inference.py → _Dependances/tools.py} +8 -5
- NeuralNetworks/_UI/Learnings.py +31 -0
- NeuralNetworks/_UI/Losses.py +48 -0
- NeuralNetworks/{UI → _UI}/__init__.py +1 -1
- NeuralNetworks/_UI/_plot.py +50 -0
- NeuralNetworks/__init__.py +6 -7
- NeuralNetworks/{shared → _shared}/__init__.py +1 -1
- NeuralNetworks/_shared/module.py +115 -0
- {neuralnetworks-0.2.6.dist-info → neuralnetworks-0.2.8.dist-info}/METADATA +8 -15
- neuralnetworks-0.2.8.dist-info/RECORD +24 -0
- {neuralnetworks-0.2.6.dist-info → neuralnetworks-0.2.8.dist-info}/WHEEL +1 -1
- NeuralNetworks/Dependances/__init__.py +0 -75
- NeuralNetworks/Dependances/pytorch.py +0 -113
- NeuralNetworks/MLP/FourierFeatures.py +0 -89
- NeuralNetworks/MLP/Layers.py +0 -31
- NeuralNetworks/Trainer/dynamic_learning_rate.py +0 -79
- NeuralNetworks/Trainer/sample_data.py +0 -19
- NeuralNetworks/Trainer/train.py +0 -70
- NeuralNetworks/UI/Learnings.py +0 -45
- NeuralNetworks/UI/Losses.py +0 -45
- NeuralNetworks/shared/module.py +0 -41
- neuralnetworks-0.2.6.dist-info/RECORD +0 -22
- {neuralnetworks-0.2.6.dist-info → neuralnetworks-0.2.8.dist-info}/licenses/LICENSE +0 -0
- {neuralnetworks-0.2.6.dist-info → neuralnetworks-0.2.8.dist-info}/top_level.txt +0 -0
|
@@ -1,113 +0,0 @@
|
|
|
1
|
-
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
-
# Copyright (C) 2026 Alexandre Brun
|
|
3
|
-
# This program is free software: you can redistribute it and/or modify
|
|
4
|
-
# it under the terms of the GNU General Public License as published by
|
|
5
|
-
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
-
# (at your option) any later version.
|
|
7
|
-
|
|
8
|
-
import os
|
|
9
|
-
|
|
10
|
-
import platform
|
|
11
|
-
|
|
12
|
-
import torch
|
|
13
|
-
import torch.optim as optim
|
|
14
|
-
import torch.nn as nn
|
|
15
|
-
import torch.quantization as tq
|
|
16
|
-
from torch.amp import autocast, GradScaler
|
|
17
|
-
from torch.utils.data import TensorDataset, DataLoader
|
|
18
|
-
|
|
19
|
-
from torchmetrics.image import PeakSignalNoiseRatio as PSNR
|
|
20
|
-
from torchvision.transforms import ToTensor, Resize, Compose
|
|
21
|
-
|
|
22
|
-
import visualtorch
|
|
23
|
-
|
|
24
|
-
torch.cuda.empty_cache()
|
|
25
|
-
def get_best_device():
|
|
26
|
-
|
|
27
|
-
os_name = platform.system().lower()
|
|
28
|
-
|
|
29
|
-
# =========== APPLE SILICON (macOS) ===========
|
|
30
|
-
if os_name == "darwin":
|
|
31
|
-
if torch.backends.mps.is_available():
|
|
32
|
-
return torch.device("mps")
|
|
33
|
-
|
|
34
|
-
# =========== WINDOWS ===========
|
|
35
|
-
if os_name == "windows":
|
|
36
|
-
# 1) CUDA
|
|
37
|
-
if torch.cuda.is_available():
|
|
38
|
-
return torch.device("cuda")
|
|
39
|
-
|
|
40
|
-
# =========== LINUX ===========
|
|
41
|
-
if os_name == "linux":
|
|
42
|
-
# 1) CUDA (Nvidia)
|
|
43
|
-
if torch.cuda.is_available():
|
|
44
|
-
return torch.device("cuda")
|
|
45
|
-
# 2) ROCm (AMD)
|
|
46
|
-
elif hasattr(torch.backends, "hip") and torch.backends.hip.is_available():
|
|
47
|
-
return torch.device("cuda")
|
|
48
|
-
|
|
49
|
-
# 3) Intel oneAPI / XPU
|
|
50
|
-
elif hasattr(torch, "xpu") and torch.xpu.is_available():
|
|
51
|
-
return torch.device("xpu")
|
|
52
|
-
|
|
53
|
-
# =========== Unknown OS ===========
|
|
54
|
-
return torch.device("cpu")
|
|
55
|
-
|
|
56
|
-
device = get_best_device()
|
|
57
|
-
|
|
58
|
-
# --- Optimisations CUDA ---
|
|
59
|
-
# Accélération des convolutions et matmul
|
|
60
|
-
torch.backends.cudnn.benchmark = True
|
|
61
|
-
torch.backends.cudnn.enabled = True
|
|
62
|
-
torch.backends.cuda.matmul.allow_tf32 = True
|
|
63
|
-
torch.backends.cudnn.allow_tf32 = True
|
|
64
|
-
|
|
65
|
-
# Paramètres autograd
|
|
66
|
-
torch.autograd.set_detect_anomaly(False)
|
|
67
|
-
torch.autograd.profiler.profile(enabled=False)
|
|
68
|
-
torch.use_deterministic_algorithms(False)
|
|
69
|
-
|
|
70
|
-
torch._inductor.config.max_autotune = "max"
|
|
71
|
-
|
|
72
|
-
norm_list = {
|
|
73
|
-
"Relu": nn.ReLU(),
|
|
74
|
-
"LeakyRelu": nn.LeakyReLU(),
|
|
75
|
-
"ELU": nn.ELU(),
|
|
76
|
-
"SELU": nn.SELU(),
|
|
77
|
-
"GELU": nn.GELU(),
|
|
78
|
-
"Mish": nn.Mish(),
|
|
79
|
-
"Sigmoid": nn.Sigmoid(),
|
|
80
|
-
"Tanh": nn.Tanh(),
|
|
81
|
-
"Hardtanh": nn.Hardtanh(),
|
|
82
|
-
"Softplus": nn.Softplus(),
|
|
83
|
-
"Softsign": nn.Softsign()
|
|
84
|
-
}
|
|
85
|
-
|
|
86
|
-
crit_list = {
|
|
87
|
-
"MSE": nn.MSELoss(),
|
|
88
|
-
"L1": nn.L1Loss(),
|
|
89
|
-
"SmoothL1": nn.SmoothL1Loss(),
|
|
90
|
-
"Huber": nn.HuberLoss(),
|
|
91
|
-
"CrossEntropy": nn.CrossEntropyLoss(),
|
|
92
|
-
"KLDiv": nn.KLDivLoss(),
|
|
93
|
-
"PoissonNLL": nn.PoissonNLLLoss(),
|
|
94
|
-
"MultiLabelSoftMargin": nn.MultiLabelSoftMarginLoss()
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
def optim_list(params):
|
|
98
|
-
return {
|
|
99
|
-
"Adadelta": optim.Adadelta(params),
|
|
100
|
-
"Adafactor": optim.Adafactor(params),
|
|
101
|
-
"Adam": optim.Adam(params),
|
|
102
|
-
"AdamW": optim.AdamW(params),
|
|
103
|
-
"Adamax": optim.Adamax(params),
|
|
104
|
-
"ASGD": optim.ASGD(params),
|
|
105
|
-
"NAdam": optim.NAdam(params),
|
|
106
|
-
"RAdam": optim.RAdam(params),
|
|
107
|
-
"RMSprop": optim.RMSprop(params),
|
|
108
|
-
"Rprop": optim.Rprop(params),
|
|
109
|
-
"SGD": optim.SGD(params)
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
def tensorise(obj):
|
|
113
|
-
return torch.as_tensor(obj, dtype=torch.float32, device='cpu')
|
|
@@ -1,89 +0,0 @@
|
|
|
1
|
-
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
-
# Copyright (C) 2026 Alexandre Brun
|
|
3
|
-
# This program is free software: you can redistribute it and/or modify
|
|
4
|
-
# it under the terms of the GNU General Public License as published by
|
|
5
|
-
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
-
# (at your option) any later version.
|
|
7
|
-
|
|
8
|
-
from ..Dependances import torch, nn, np
|
|
9
|
-
|
|
10
|
-
class FourierEncoding (nn.Module):
|
|
11
|
-
"""
|
|
12
|
-
Encodage de Fourier aléatoire pour enrichir la représentation des entrées.
|
|
13
|
-
|
|
14
|
-
Cette couche projette les entrées dans un espace fréquentiel à l'aide
|
|
15
|
-
d'une matrice de projection apprise, puis applique des fonctions sinus
|
|
16
|
-
et cosinus afin de capturer des variations à haute fréquence.
|
|
17
|
-
|
|
18
|
-
Parameters
|
|
19
|
-
----------
|
|
20
|
-
nb_fourier : int
|
|
21
|
-
Nombre de composantes de Fourier.
|
|
22
|
-
input_size : int
|
|
23
|
-
Dimension des entrées.
|
|
24
|
-
sigma : float
|
|
25
|
-
Écart-type utilisé pour l'initialisation de la matrice de projection.
|
|
26
|
-
"""
|
|
27
|
-
def __init__ (self, nb_fourier, input_size, sigma):
|
|
28
|
-
super ().__init__ ()
|
|
29
|
-
self.B = nn.Parameter (torch.randn (nb_fourier, input_size) * sigma)
|
|
30
|
-
|
|
31
|
-
def forward (self, x):
|
|
32
|
-
"""
|
|
33
|
-
Applique l'encodage de Fourier aux entrées.
|
|
34
|
-
|
|
35
|
-
Parameters
|
|
36
|
-
----------
|
|
37
|
-
x : torch.Tensor
|
|
38
|
-
Tensor d'entrée de shape `(N, input_size)`.
|
|
39
|
-
|
|
40
|
-
Returns
|
|
41
|
-
-------
|
|
42
|
-
torch.Tensor
|
|
43
|
-
Tensor encodé de shape `(N, 2 * nb_fourier)`, correspondant
|
|
44
|
-
à la concaténation des cosinus et sinus.
|
|
45
|
-
"""
|
|
46
|
-
vp = 2 * np.pi * x @ self.B.T
|
|
47
|
-
return torch.cat ((torch.cos (vp), torch.sin (vp)), dim = -1)
|
|
48
|
-
|
|
49
|
-
def encode (input_size, output_size, sigmas, fourier_input_size, nb_fourier):
|
|
50
|
-
"""
|
|
51
|
-
Construit les modules d'encodage (Fourier ou identité) et la couche de fusion associée.
|
|
52
|
-
|
|
53
|
-
Si `sigmas` est `None`, aucun encodage de Fourier n'est appliqué et les
|
|
54
|
-
entrées sont transmises directement au réseau.
|
|
55
|
-
Sinon, plusieurs encodages de Fourier sont créés (un par sigma), et
|
|
56
|
-
leurs sorties sont fusionnées via une couche linéaire.
|
|
57
|
-
|
|
58
|
-
Parameters
|
|
59
|
-
----------
|
|
60
|
-
input_size : int
|
|
61
|
-
Dimension des entrées.
|
|
62
|
-
output_size : int
|
|
63
|
-
Dimension de sortie du réseau.
|
|
64
|
-
sigmas : list[float] ou None
|
|
65
|
-
Liste des paramètres sigma pour les encodages de Fourier.
|
|
66
|
-
fourier_input_size : int
|
|
67
|
-
Dimension attendue après encodage (non utilisée directement ici,
|
|
68
|
-
mais conservée pour cohérence avec l'architecture globale).
|
|
69
|
-
nb_fourier : int
|
|
70
|
-
Nombre de composantes de Fourier par encodage.
|
|
71
|
-
|
|
72
|
-
Returns
|
|
73
|
-
-------
|
|
74
|
-
encodings : torch.nn.ModuleList (scripté)
|
|
75
|
-
Liste des modules d'encodage (Fourier ou identité).
|
|
76
|
-
f : torch.nn.Module (scripté)
|
|
77
|
-
Module de fusion des encodages (identité ou couche linéaire).
|
|
78
|
-
"""
|
|
79
|
-
if sigmas is None:
|
|
80
|
-
encodings = nn.ModuleList (
|
|
81
|
-
[nn.Identity ()]
|
|
82
|
-
)
|
|
83
|
-
f = nn.Identity ()
|
|
84
|
-
else:
|
|
85
|
-
encodings = nn.ModuleList (
|
|
86
|
-
[FourierEncoding (nb_fourier, input_size, sigma) for sigma in sigmas]
|
|
87
|
-
)
|
|
88
|
-
f = nn.Linear (len (encodings) * output_size, output_size)
|
|
89
|
-
return torch.jit.script (encodings), torch.jit.script (f)
|
NeuralNetworks/MLP/Layers.py
DELETED
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
-
# Copyright (C) 2026 Alexandre Brun
|
|
3
|
-
# This program is free software: you can redistribute it and/or modify
|
|
4
|
-
# it under the terms of the GNU General Public License as published by
|
|
5
|
-
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
-
# (at your option) any later version.
|
|
7
|
-
|
|
8
|
-
from ..Dependances import torch, nn
|
|
9
|
-
|
|
10
|
-
def create_layers(
|
|
11
|
-
input_size,
|
|
12
|
-
output_size,
|
|
13
|
-
hidden_layers,
|
|
14
|
-
sigmas,
|
|
15
|
-
fourier_input_size,
|
|
16
|
-
nb_fourier,
|
|
17
|
-
norm):
|
|
18
|
-
|
|
19
|
-
layer_list = [
|
|
20
|
-
nn.Linear (input_size if sigmas is None else 2 * nb_fourier, hidden_layers [0]),
|
|
21
|
-
norm
|
|
22
|
-
]
|
|
23
|
-
|
|
24
|
-
for k in range (len (hidden_layers) - 1):
|
|
25
|
-
layer_list.extend ([
|
|
26
|
-
nn.Linear (hidden_layers [k], hidden_layers [k + 1]),
|
|
27
|
-
norm
|
|
28
|
-
])
|
|
29
|
-
layer_list.append (nn.Linear (hidden_layers [-1], output_size))
|
|
30
|
-
|
|
31
|
-
return torch.jit.script (nn.Sequential (*layer_list))
|
|
@@ -1,79 +0,0 @@
|
|
|
1
|
-
# NeuralNetworks- Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
-
# Copyright (C) 2026 Alexandre Brun
|
|
3
|
-
# This program is free software: you can redistribute it and/or modify
|
|
4
|
-
# it under the terms of the GNU General Public License as published by
|
|
5
|
-
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
-
# (at your option) any later version.
|
|
7
|
-
|
|
8
|
-
from ..Dependances import np
|
|
9
|
-
|
|
10
|
-
def generate_learning_rate (
|
|
11
|
-
Nb_iter,
|
|
12
|
-
X0,
|
|
13
|
-
mode = "smoother",
|
|
14
|
-
first = 0.4,
|
|
15
|
-
second = 1,
|
|
16
|
-
Xi = 5e-4,
|
|
17
|
-
Xf = 1e-6):
|
|
18
|
-
|
|
19
|
-
infl = int (first * Nb_iter)
|
|
20
|
-
Plat = int (second * Nb_iter)
|
|
21
|
-
|
|
22
|
-
def smoothstep (x0, xa, n, m):
|
|
23
|
-
values = []
|
|
24
|
-
if m == "smooth":
|
|
25
|
-
for i in range (n):
|
|
26
|
-
t = i / (n - 1) # t dans [0, 1]
|
|
27
|
-
s = t * t * (3 - 2 * t) # smoothstep
|
|
28
|
-
x = x0 + (xa - x0) * s
|
|
29
|
-
values.append (x)
|
|
30
|
-
elif m == "smoother":
|
|
31
|
-
for i in range(n):
|
|
32
|
-
t = i / (n - 1) # t dans [0, 1]
|
|
33
|
-
s = t * t * t * (t * (6 * t - 15) + 10)
|
|
34
|
-
x = x0 + (xa - x0) * s
|
|
35
|
-
values.append(x)
|
|
36
|
-
else:
|
|
37
|
-
raise ValueError("mode doit être 'smooth' ou 'smoother'")
|
|
38
|
-
return values
|
|
39
|
-
|
|
40
|
-
cuv1 = smoothstep (X0, Xi, infl, mode)
|
|
41
|
-
cuv2 = smoothstep (Xi, Xf, Plat - infl, mode)
|
|
42
|
-
cuv3 = [Xf for _ in range (Plat, Nb_iter)]
|
|
43
|
-
|
|
44
|
-
return np.array (cuv1 + cuv2 + cuv3)
|
|
45
|
-
|
|
46
|
-
def update_lr (losses, lrs, epoch, lr):
|
|
47
|
-
|
|
48
|
-
loss = losses[-1] + (losses[-1] - losses[0])/len(losses)
|
|
49
|
-
|
|
50
|
-
n = 9
|
|
51
|
-
# Points de contrôle (multiplicité finale = dérivée nulle)
|
|
52
|
-
P = np.array([
|
|
53
|
-
0.0,
|
|
54
|
-
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
|
|
55
|
-
1.0, 1.0
|
|
56
|
-
])
|
|
57
|
-
|
|
58
|
-
# Coefficients binomiaux (précomputés UNE FOIS)
|
|
59
|
-
C = np.array([1, 9, 36, 84, 126, 126, 84, 36, 9, 1], dtype=float)
|
|
60
|
-
|
|
61
|
-
x = np.clip(loss, 0.0, 1.0)
|
|
62
|
-
t = np.sqrt(x)
|
|
63
|
-
|
|
64
|
-
u = 1.0 - t
|
|
65
|
-
|
|
66
|
-
# Bernstein vectorisé
|
|
67
|
-
y = (
|
|
68
|
-
C[0] * u**9 * P[0] +
|
|
69
|
-
C[1] * u**8 * t * P[1] +
|
|
70
|
-
C[2] * u**7 * t**2 * P[2] +
|
|
71
|
-
C[3] * u**6 * t**3 * P[3] +
|
|
72
|
-
C[4] * u**5 * t**4 * P[4] +
|
|
73
|
-
C[5] * u**4 * t**5 * P[5] +
|
|
74
|
-
C[6] * u**3 * t**6 * P[6] +
|
|
75
|
-
C[7] * u**2 * t**7 * P[7] +
|
|
76
|
-
C[8] * u * t**8 * P[8] +
|
|
77
|
-
C[9] * t**9 * P[9]
|
|
78
|
-
)
|
|
79
|
-
return np.clip (max(0.001 * y, lrs [epoch]), 0.0, lr)
|
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
-
# Copyright (C) 2026 Alexandre Brun
|
|
3
|
-
# This program is free software: you can redistribute it and/or modify
|
|
4
|
-
# it under the terms of the GNU General Public License as published by
|
|
5
|
-
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
-
# (at your option) any later version.
|
|
7
|
-
|
|
8
|
-
from ..Dependances import train_test_split
|
|
9
|
-
|
|
10
|
-
def sample_data (inputs, outputs, test_size):
|
|
11
|
-
if test_size is None:
|
|
12
|
-
return inputs, inputs, outputs, outputs
|
|
13
|
-
else:
|
|
14
|
-
return train_test_split (
|
|
15
|
-
inputs,
|
|
16
|
-
outputs,
|
|
17
|
-
test_size = test_size,
|
|
18
|
-
random_state = 42
|
|
19
|
-
)
|
NeuralNetworks/Trainer/train.py
DELETED
|
@@ -1,70 +0,0 @@
|
|
|
1
|
-
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
-
# Copyright (C) 2026 Alexandre Brun
|
|
3
|
-
# This program is free software: you can redistribute it and/or modify
|
|
4
|
-
# it under the terms of the GNU General Public License as published by
|
|
5
|
-
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
-
# (at your option) any later version.
|
|
7
|
-
|
|
8
|
-
from ..Dependances import torch, GradScaler, device, tqdm, autocast
|
|
9
|
-
|
|
10
|
-
from .dynamic_learning_rate import generate_learning_rate, update_lr
|
|
11
|
-
|
|
12
|
-
def train_f (Trainer, num_epochs = 1, activate_tqdm = True):
|
|
13
|
-
dev = str (device)
|
|
14
|
-
scaler = GradScaler (dev)
|
|
15
|
-
|
|
16
|
-
lrs = generate_learning_rate (num_epochs, Trainer.init_lr)
|
|
17
|
-
|
|
18
|
-
Trainer.X_train = Trainer.X_train.to (device)
|
|
19
|
-
Trainer.y_train = Trainer.y_train.to (device)
|
|
20
|
-
n_samples = Trainer.X_train.size (0)
|
|
21
|
-
|
|
22
|
-
torch.cuda.empty_cache ()
|
|
23
|
-
for k, net in enumerate (Trainer.nets):
|
|
24
|
-
net = net.to (device)
|
|
25
|
-
net.learnings.append(Trainer.init_lr)
|
|
26
|
-
|
|
27
|
-
pbar = tqdm (
|
|
28
|
-
range (num_epochs),
|
|
29
|
-
desc = f"train epoch",
|
|
30
|
-
disable = not (activate_tqdm),
|
|
31
|
-
mininterval=0.5
|
|
32
|
-
)
|
|
33
|
-
|
|
34
|
-
for epoch in pbar:
|
|
35
|
-
# Génération d'un ordre aléatoire des indices
|
|
36
|
-
perm = torch.randperm (n_samples, device = device)
|
|
37
|
-
epoch_loss = 0.0
|
|
38
|
-
|
|
39
|
-
# --- Parcours des mini-batchs ---
|
|
40
|
-
for i in range (0, n_samples, Trainer.batch_size):
|
|
41
|
-
idx = perm [i : i + Trainer.batch_size]
|
|
42
|
-
|
|
43
|
-
# Fonction interne calculant la perte et les gradients
|
|
44
|
-
def closure ():
|
|
45
|
-
Trainer.optims [k].zero_grad (set_to_none = True)
|
|
46
|
-
with autocast (dev):
|
|
47
|
-
loss = Trainer.crit (net.train_forward (Trainer.X_train [idx]),
|
|
48
|
-
Trainer.y_train [idx]
|
|
49
|
-
)
|
|
50
|
-
scaler.scale (loss).backward ()
|
|
51
|
-
return loss
|
|
52
|
-
|
|
53
|
-
epoch_loss += closure()
|
|
54
|
-
scaler.step (Trainer.optims [k])
|
|
55
|
-
scaler.update ()
|
|
56
|
-
|
|
57
|
-
# --- Stockage de la perte de l'époque ---
|
|
58
|
-
#Trainer.frequencies.append(net.encodings[0].B.detach().cpu().clone())
|
|
59
|
-
net.losses.append (epoch_loss.item ())
|
|
60
|
-
net.learnings.append (update_lr (net.losses [-20:], lrs, epoch, net.learnings[-1]))
|
|
61
|
-
for param_group in Trainer.optims [k].param_groups:
|
|
62
|
-
param_group ['lr'] = net.learnings[-1]
|
|
63
|
-
|
|
64
|
-
#pbar.set_postfix(loss=f"{epoch_loss:.5f}",lr=f"{net.learnings[-1]:.5f}")
|
|
65
|
-
|
|
66
|
-
net = net.to ('cpu')
|
|
67
|
-
net.learnings.pop(-1)
|
|
68
|
-
Trainer.X_train = Trainer.X_train.to ('cpu')
|
|
69
|
-
Trainer.y_train = Trainer.y_train.to ('cpu')
|
|
70
|
-
torch.cuda.empty_cache ()
|
NeuralNetworks/UI/Learnings.py
DELETED
|
@@ -1,45 +0,0 @@
|
|
|
1
|
-
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
-
# Copyright (C) 2026 Alexandre Brun
|
|
3
|
-
# This program is free software: you can redistribute it and/or modify
|
|
4
|
-
# it under the terms of the GNU General Public License as published by
|
|
5
|
-
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
-
# (at your option) any later version.
|
|
7
|
-
|
|
8
|
-
from ..Dependances import plt, np
|
|
9
|
-
|
|
10
|
-
def learnings(*nets):
|
|
11
|
-
|
|
12
|
-
# --- Initialisation de la figure ---
|
|
13
|
-
fig, ax1 = plt.subplots()
|
|
14
|
-
fig.set_figheight(5)
|
|
15
|
-
fig.set_figwidth(5)
|
|
16
|
-
|
|
17
|
-
# --- Définition des limites des axes ---
|
|
18
|
-
all_learnings = [[lr for lr in net.learnings] for net in nets]
|
|
19
|
-
if max(len(lst) for lst in all_learnings) == 1:
|
|
20
|
-
lenlearnings = 2
|
|
21
|
-
else:
|
|
22
|
-
lenlearnings = max(len(lst) for lst in all_learnings)
|
|
23
|
-
plt.xlim(1, lenlearnings)
|
|
24
|
-
|
|
25
|
-
# --- Tracé des courbes de pertes pour chaque réseau ---
|
|
26
|
-
for k, net in enumerate(nets):
|
|
27
|
-
ax1.plot(
|
|
28
|
-
np.arange(1, len(all_learnings[k]) + 1),
|
|
29
|
-
all_learnings[k],
|
|
30
|
-
label=net.name
|
|
31
|
-
)
|
|
32
|
-
ax1.set_xlabel("Epochs")
|
|
33
|
-
ax1.set_ylabel("Learning rate")
|
|
34
|
-
ax1.legend(loc="upper left")
|
|
35
|
-
ax1.grid(True)
|
|
36
|
-
|
|
37
|
-
plt.yscale('log', nonpositive='mask')
|
|
38
|
-
# --- Affichage ---
|
|
39
|
-
plt.legend()
|
|
40
|
-
plt.xlabel("Epoch")
|
|
41
|
-
plt.ylabel("Learning rate")
|
|
42
|
-
fig.canvas.draw_idle()
|
|
43
|
-
plt.tight_layout()
|
|
44
|
-
plt.ion() # mode interactif
|
|
45
|
-
plt.show()
|
NeuralNetworks/UI/Losses.py
DELETED
|
@@ -1,45 +0,0 @@
|
|
|
1
|
-
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
-
# Copyright (C) 2026 Alexandre Brun
|
|
3
|
-
# This program is free software: you can redistribute it and/or modify
|
|
4
|
-
# it under the terms of the GNU General Public License as published by
|
|
5
|
-
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
-
# (at your option) any later version.
|
|
7
|
-
|
|
8
|
-
from ..Dependances import plt, np
|
|
9
|
-
|
|
10
|
-
def losses(*nets):
|
|
11
|
-
|
|
12
|
-
# --- Initialisation de la figure ---
|
|
13
|
-
fig, ax1 = plt.subplots()
|
|
14
|
-
fig.set_figheight(5)
|
|
15
|
-
fig.set_figwidth(5)
|
|
16
|
-
|
|
17
|
-
# --- Définition des limites des axes ---
|
|
18
|
-
all_losses = [[loss for loss in net.losses] for net in nets]
|
|
19
|
-
if max(len(lst) for lst in all_losses) == 1:
|
|
20
|
-
lenlosses = 2
|
|
21
|
-
else:
|
|
22
|
-
lenlosses = max(len(lst) for lst in all_losses)
|
|
23
|
-
plt.xlim(1, lenlosses)
|
|
24
|
-
|
|
25
|
-
# --- Tracé des courbes de pertes pour chaque réseau ---
|
|
26
|
-
for k, net in enumerate(nets):
|
|
27
|
-
ax1.plot(
|
|
28
|
-
np.arange(1, len(all_losses[k]) + 1),
|
|
29
|
-
all_losses[k],
|
|
30
|
-
label=net.name
|
|
31
|
-
)
|
|
32
|
-
ax1.set_xlabel("Epochs")
|
|
33
|
-
ax1.set_ylabel("Loss")
|
|
34
|
-
ax1.legend(loc="upper left")
|
|
35
|
-
ax1.grid(True)
|
|
36
|
-
|
|
37
|
-
plt.yscale('log', nonpositive='mask')
|
|
38
|
-
# --- Affichage ---
|
|
39
|
-
plt.legend()
|
|
40
|
-
plt.xlabel("Epoch")
|
|
41
|
-
plt.ylabel("Résidus")
|
|
42
|
-
fig.canvas.draw_idle()
|
|
43
|
-
plt.tight_layout()
|
|
44
|
-
plt.ion() # mode interactif
|
|
45
|
-
plt.show()
|
NeuralNetworks/shared/module.py
DELETED
|
@@ -1,41 +0,0 @@
|
|
|
1
|
-
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
-
# Copyright (C) 2026 Alexandre Brun
|
|
3
|
-
# This program is free software: you can redistribute it and/or modify
|
|
4
|
-
# it under the terms of the GNU General Public License as published by
|
|
5
|
-
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
-
# (at your option) any later version.
|
|
7
|
-
|
|
8
|
-
from ..Dependances import norm_list, nn, torch, device
|
|
9
|
-
|
|
10
|
-
class Module (nn.Module):
|
|
11
|
-
|
|
12
|
-
def __init__ (self, name = "Net"):
|
|
13
|
-
super ().__init__ ()
|
|
14
|
-
|
|
15
|
-
# --- Attributs ---
|
|
16
|
-
self.losses = []
|
|
17
|
-
self.learnings = []
|
|
18
|
-
self.name = name
|
|
19
|
-
|
|
20
|
-
def _forward (self,x):
|
|
21
|
-
raise Exception ("_forward n'est pas défini dans la classe")
|
|
22
|
-
|
|
23
|
-
def train_forward (self,x):
|
|
24
|
-
raise Exception ("train_forward n'est pas défini dans la classe")
|
|
25
|
-
|
|
26
|
-
def forward (self, x):
|
|
27
|
-
|
|
28
|
-
with torch.no_grad ():
|
|
29
|
-
x = x.unsqueeze (0) if x.dim () == 1 else x
|
|
30
|
-
self = self.to (device)
|
|
31
|
-
x = x.to (device)
|
|
32
|
-
|
|
33
|
-
output = self._forward(x).cpu ().numpy ().flatten ()
|
|
34
|
-
|
|
35
|
-
x = x.to ('cpu')
|
|
36
|
-
self = self.to ('cpu')
|
|
37
|
-
|
|
38
|
-
return output
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
NeuralNetworks/__init__.py,sha256=DFqmUzb9LQDti9PGdZgvR-1irNeurxsVhvwPEuBj-ro,668
|
|
2
|
-
NeuralNetworks/Dependances/__init__.py,sha256=qEpDbSD8cCq-E5XVisNUVf3kZOYopDnQWToyRefPgKE,1227
|
|
3
|
-
NeuralNetworks/Dependances/matplot.py,sha256=elS8u6DZHYP-8mHEpYNOw3jDzhCAWTld9tm3OAD46zw,957
|
|
4
|
-
NeuralNetworks/Dependances/pytorch.py,sha256=r07s7EYeSQJh0PqODaMby4RttZf8m1mUyHrM3uXm370,3287
|
|
5
|
-
NeuralNetworks/MLP/FourierFeatures.py,sha256=klgRM1HK09oA2NRMDxQMjJJ-WoUd5hV1ip5hHe9rHjI,3250
|
|
6
|
-
NeuralNetworks/MLP/Layers.py,sha256=WAksXsiMxaClyYTxPhlyQbwwj9qTtXs3EWCO1RqjUHY,945
|
|
7
|
-
NeuralNetworks/MLP/__init__.py,sha256=zQUczdC-5xkMBZ-cOFpHJrbgJz0grmVRABEdqk7080c,2739
|
|
8
|
-
NeuralNetworks/MLP/inference.py,sha256=pZja1yhkT3BUh9E2X__2LO5QuAamPHITI32rT5Q_cQo,543
|
|
9
|
-
NeuralNetworks/Trainer/__init__.py,sha256=XYfwidMxpI30y8j3FUgvhtRMrrWLvszZE2EwM72tFq4,1952
|
|
10
|
-
NeuralNetworks/Trainer/dynamic_learning_rate.py,sha256=1JAD-k0cjdL_71zGeeCUFOa61H4PzFITDjZ2nK0TzXU,2340
|
|
11
|
-
NeuralNetworks/Trainer/sample_data.py,sha256=7waC9colb7DXU4yKMcgcCnPG3Guv-isipcgVHJPPCNE,673
|
|
12
|
-
NeuralNetworks/Trainer/train.py,sha256=dpo1VDu0VNlzXPqJWGkmcY_y18Y05DTjA5wDg7AbLco,2989
|
|
13
|
-
NeuralNetworks/UI/Learnings.py,sha256=4TBR5pcjyoBeL7eikNKM6xn25jnqL-mWT7hbrt9q-Gw,1418
|
|
14
|
-
NeuralNetworks/UI/Losses.py,sha256=Tu5xuDiutR9a4xcZKpyWN_tzSDu3_fImEf8FbAEehio,1378
|
|
15
|
-
NeuralNetworks/UI/__init__.py,sha256=L96xwQZJ-HoqqOGxaheosiDKHR3mRopuXkif--rO1J4,409
|
|
16
|
-
NeuralNetworks/shared/__init__.py,sha256=LKnIP46kbzOy7O3bSALwUg7iu8_wpis0LzUVVaW20Ws,376
|
|
17
|
-
NeuralNetworks/shared/module.py,sha256=Y6A1LJRFPWOhwNOjPTTyzxPVafEZMbko_fLXKtXoDYI,1196
|
|
18
|
-
neuralnetworks-0.2.6.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
19
|
-
neuralnetworks-0.2.6.dist-info/METADATA,sha256=1p55SI9TCwSSkj6N1bR0AgXOWA5fFKUYs1ZPlvSUx40,18349
|
|
20
|
-
neuralnetworks-0.2.6.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
|
|
21
|
-
neuralnetworks-0.2.6.dist-info/top_level.txt,sha256=h18nmC1BX7avyAAwKh0OQWezxgXmOpmVtbFq-8Mcbms,15
|
|
22
|
-
neuralnetworks-0.2.6.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|