NeuralNetworks 0.2.5__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- NeuralNetworks/MLP/MLP.py +64 -0
- NeuralNetworks/MLP/_MLP_tools.py +74 -0
- NeuralNetworks/MLP/__init__.py +2 -93
- NeuralNetworks/Trainer/Trainer.py +98 -0
- NeuralNetworks/Trainer/_Trainer_tools.py +155 -0
- NeuralNetworks/Trainer/__init__.py +2 -45
- NeuralNetworks/VAE/VAE.py +114 -0
- NeuralNetworks/VAE/_VAE_tools.py +6 -0
- NeuralNetworks/VAE/__init__.py +8 -0
- NeuralNetworks/{Dependances/matplot.py → _Dependances/__init__.py} +12 -12
- NeuralNetworks/_Dependances/pytorch.py +125 -0
- NeuralNetworks/_Dependances/tools.py +15 -0
- NeuralNetworks/_UI/Learnings.py +31 -0
- NeuralNetworks/_UI/Losses.py +48 -0
- NeuralNetworks/{UI → _UI}/__init__.py +1 -1
- NeuralNetworks/_UI/_plot.py +50 -0
- NeuralNetworks/__init__.py +6 -7
- NeuralNetworks/_shared/__init__.py +8 -0
- NeuralNetworks/_shared/module.py +115 -0
- {neuralnetworks-0.2.5.dist-info → neuralnetworks-0.2.8.dist-info}/METADATA +8 -15
- neuralnetworks-0.2.8.dist-info/RECORD +24 -0
- {neuralnetworks-0.2.5.dist-info → neuralnetworks-0.2.8.dist-info}/WHEEL +1 -1
- NeuralNetworks/Dependances/__init__.py +0 -75
- NeuralNetworks/Dependances/pytorch.py +0 -111
- NeuralNetworks/MLP/FourierFeatures.py +0 -89
- NeuralNetworks/MLP/Layers.py +0 -31
- NeuralNetworks/MLP/inference.py +0 -26
- NeuralNetworks/Trainer/dynamic_learning_rate.py +0 -79
- NeuralNetworks/Trainer/sample_data.py +0 -19
- NeuralNetworks/Trainer/train.py +0 -75
- NeuralNetworks/UI/Learnings.py +0 -45
- NeuralNetworks/UI/Losses.py +0 -45
- neuralnetworks-0.2.5.dist-info/RECORD +0 -20
- {neuralnetworks-0.2.5.dist-info → neuralnetworks-0.2.8.dist-info}/licenses/LICENSE +0 -0
- {neuralnetworks-0.2.5.dist-info → neuralnetworks-0.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
+
# Copyright (C) 2025 - 2026 Alexandre Brun
|
|
3
|
+
# This program is free software: you can redistribute it and/or modify
|
|
4
|
+
# it under the terms of the GNU General Public License as published by
|
|
5
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
+
# (at your option) any later version.
|
|
7
|
+
|
|
8
|
+
from ._MLP_tools import encode, create_layers, torch
|
|
9
|
+
from .._shared import Module
|
|
10
|
+
|
|
11
|
+
class MLP (Module):
|
|
12
|
+
"""
|
|
13
|
+
Réseau de neurones MLP avec encodage optionnel de type Fourier.
|
|
14
|
+
"""
|
|
15
|
+
def __init__ (self ,
|
|
16
|
+
input_size : int = 1 , # Dimension d'entrée
|
|
17
|
+
output_size : int = 1 , # Dimension de sortie
|
|
18
|
+
hidden_layers : list = [1] , # Tailles des couches cachées
|
|
19
|
+
sigmas : list = None , # Répartition des fréquences
|
|
20
|
+
fourier_input_size : int = 1 , # Dimension d'entrée pour l'encodage Fourier
|
|
21
|
+
nb_fourier : int = 8 , # Nombre de composantes Fourier
|
|
22
|
+
norm : str = "Relu", # Fonction d'activation / normalisation
|
|
23
|
+
name : str = "Net"): # Nom du modèle
|
|
24
|
+
|
|
25
|
+
""" Donnés de reconstruction de l'objet """
|
|
26
|
+
super ().__init__ (name , #
|
|
27
|
+
input_size = input_size , #
|
|
28
|
+
output_size = output_size , #
|
|
29
|
+
hidden_layers = hidden_layers , #
|
|
30
|
+
sigmas = 0 if sigmas is None else sigmas, #
|
|
31
|
+
fourier_input_size = fourier_input_size , #
|
|
32
|
+
nb_fourier = nb_fourier , #
|
|
33
|
+
norm = norm , #
|
|
34
|
+
name = name ) #
|
|
35
|
+
|
|
36
|
+
self.encodings, self.f = encode (
|
|
37
|
+
input_size = input_size , # Couche d'adaptation d'entrée
|
|
38
|
+
output_size = output_size , # Couche d'adaptation de sortie
|
|
39
|
+
sigmas = sigmas , # Répartition des fréquences
|
|
40
|
+
fourier_input_size = fourier_input_size, # Encode les premiers inputs
|
|
41
|
+
nb_fourier = nb_fourier # Attribue les fréquences
|
|
42
|
+
)
|
|
43
|
+
self.model = create_layers (
|
|
44
|
+
input_size = input_size , # Créé une couche d'entrée
|
|
45
|
+
output_size = output_size , # Créé une couche de sortie
|
|
46
|
+
hidden_layers = hidden_layers , # Créé des couches intermédiaires
|
|
47
|
+
sigmas = sigmas , # Répartition des fréquences
|
|
48
|
+
fourier_input_size = fourier_input_size, # Dimension d'encodage Fourier
|
|
49
|
+
nb_fourier = nb_fourier , # Nombre de composantes Fourier
|
|
50
|
+
norm = norm # Fonction d'activation
|
|
51
|
+
)
|
|
52
|
+
def _forward (self, x : torch.Tensor):
|
|
53
|
+
"""
|
|
54
|
+
Forward pass interne avec concaténation des encodages.
|
|
55
|
+
"""
|
|
56
|
+
results_list = [self.model (encoding (x)) for encoding in self.encodings]
|
|
57
|
+
return self.f (torch.cat (results_list, dim = 1))
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
def _dummy_input(self):
|
|
61
|
+
"""
|
|
62
|
+
Données d'entrées pour enregistrement en .onnx
|
|
63
|
+
"""
|
|
64
|
+
return torch.randn(1, self.Reconstruction_data["input_size"])
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
+
# Copyright (C) 2025 - 2026 Alexandre Brun
|
|
3
|
+
# This program is free software: you can redistribute it and/or modify
|
|
4
|
+
# it under the terms of the GNU General Public License as published by
|
|
5
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
+
# (at your option) any later version.
|
|
7
|
+
|
|
8
|
+
from .._Dependances import torch, nn, np, pi2, norms, device
|
|
9
|
+
|
|
10
|
+
def create_layers(
|
|
11
|
+
input_size : int , #
|
|
12
|
+
output_size : int , #
|
|
13
|
+
hidden_layers : int , #
|
|
14
|
+
sigmas : list, #
|
|
15
|
+
fourier_input_size : int , #
|
|
16
|
+
nb_fourier : int , #
|
|
17
|
+
norm : str): #
|
|
18
|
+
|
|
19
|
+
if fourier_input_size > input_size:
|
|
20
|
+
raise Exception ("fourier_input_size > input_size impossible")
|
|
21
|
+
if sigmas is None or isinstance(sigmas, int) or (
|
|
22
|
+
isinstance(sigmas, np.ndarray) and sigmas.ndim == 0):
|
|
23
|
+
layer_list = [
|
|
24
|
+
nn.Linear (input_size, hidden_layers [0]),
|
|
25
|
+
norms.get (norm)
|
|
26
|
+
]
|
|
27
|
+
else:
|
|
28
|
+
layer_list = [
|
|
29
|
+
nn.Linear (2*nb_fourier + input_size-fourier_input_size, hidden_layers [0]),
|
|
30
|
+
norms.get (norm)
|
|
31
|
+
]
|
|
32
|
+
|
|
33
|
+
for k in range (len (hidden_layers) - 1):
|
|
34
|
+
layer_list.extend ([
|
|
35
|
+
nn.Linear (hidden_layers [k], hidden_layers [k + 1]),
|
|
36
|
+
norms.get (norm)
|
|
37
|
+
])
|
|
38
|
+
layer_list.append (nn.Linear (hidden_layers [-1], output_size))
|
|
39
|
+
|
|
40
|
+
return nn.Sequential (*layer_list)
|
|
41
|
+
|
|
42
|
+
class FourierEncoding (nn.Module):
|
|
43
|
+
|
|
44
|
+
def __init__ (self,
|
|
45
|
+
nb_fourier : int , #
|
|
46
|
+
fourier_input_size : int , #
|
|
47
|
+
sigma : float): #
|
|
48
|
+
super ().__init__ ()
|
|
49
|
+
|
|
50
|
+
self.B = nn.Parameter (torch.randn (nb_fourier, fourier_input_size) * sigma)
|
|
51
|
+
self.size = fourier_input_size
|
|
52
|
+
|
|
53
|
+
def forward (self, x : torch.Tensor):
|
|
54
|
+
x_fourier, x_rest = x.split([self.size, x.shape[-1] - self.size], dim=-1)
|
|
55
|
+
vp = pi2 * x_fourier @ self.B.T
|
|
56
|
+
return torch.cat ((torch.cos(vp), torch.sin(vp), x_rest), dim = -1)
|
|
57
|
+
|
|
58
|
+
def encode (
|
|
59
|
+
input_size : int , #
|
|
60
|
+
output_size : int , #
|
|
61
|
+
sigmas : list, #
|
|
62
|
+
fourier_input_size : int , #
|
|
63
|
+
nb_fourier : int): #
|
|
64
|
+
|
|
65
|
+
if sigmas is None or isinstance(sigmas, int) or (
|
|
66
|
+
isinstance(sigmas, np.ndarray) and sigmas.ndim == 0):
|
|
67
|
+
return nn.ModuleList ([nn.Identity ()]), nn.Identity ()
|
|
68
|
+
|
|
69
|
+
size = fourier_input_size
|
|
70
|
+
|
|
71
|
+
return (
|
|
72
|
+
nn.ModuleList ([FourierEncoding (nb_fourier, size, sigma) for sigma in sigmas]),
|
|
73
|
+
nn.Linear (len (sigmas) * output_size, output_size)
|
|
74
|
+
)
|
NeuralNetworks/MLP/__init__.py
CHANGED
|
@@ -1,99 +1,8 @@
|
|
|
1
1
|
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
-
# Copyright (C) 2026 Alexandre Brun
|
|
2
|
+
# Copyright (C) 2025 - 2026 Alexandre Brun
|
|
3
3
|
# This program is free software: you can redistribute it and/or modify
|
|
4
4
|
# it under the terms of the GNU General Public License as published by
|
|
5
5
|
# the Free Software Foundation, either version 3 of the License, or
|
|
6
6
|
# (at your option) any later version.
|
|
7
7
|
|
|
8
|
-
from
|
|
9
|
-
from .FourierFeatures import encode
|
|
10
|
-
from .Layers import create_layers
|
|
11
|
-
from .inference import infer
|
|
12
|
-
|
|
13
|
-
class MLP (nn.Module):
|
|
14
|
-
"""
|
|
15
|
-
Multi-Layer Perceptron avec encodage de Fourier optionnel.
|
|
16
|
-
|
|
17
|
-
Cette classe implémente un MLP configurable, pouvant intégrer un ou
|
|
18
|
-
plusieurs encodages de Fourier en entrée afin d'améliorer la capacité
|
|
19
|
-
de représentation sur des signaux à haute fréquence.
|
|
20
|
-
|
|
21
|
-
Parameters
|
|
22
|
-
----------
|
|
23
|
-
input_size : int, optional
|
|
24
|
-
Dimension des entrées du réseau. Default: 1.
|
|
25
|
-
output_size : int, optional
|
|
26
|
-
Dimension des sorties du réseau. Default: 1.
|
|
27
|
-
hidden_layers : list[int], optional
|
|
28
|
-
Liste des tailles des couches cachées. Default: [1].
|
|
29
|
-
sigmas : list[float] ou None, optional
|
|
30
|
-
Paramètres sigma pour les encodages de Fourier. Si `None`,
|
|
31
|
-
aucun encodage de Fourier n'est utilisé.
|
|
32
|
-
fourier_input_size : int, optional
|
|
33
|
-
WIP
|
|
34
|
-
nb_fourier : int, optional
|
|
35
|
-
Nombre de composantes de Fourier par encodage. Default: 8.
|
|
36
|
-
norm : str, optional
|
|
37
|
-
Nom de la fonction d'activation à utiliser. Default: "Relu".
|
|
38
|
-
name : str, optional
|
|
39
|
-
Nom du réseau. Default: "Net".
|
|
40
|
-
"""
|
|
41
|
-
|
|
42
|
-
def __init__ (
|
|
43
|
-
self,
|
|
44
|
-
input_size = 1,
|
|
45
|
-
output_size = 1,
|
|
46
|
-
hidden_layers = [1],
|
|
47
|
-
sigmas = None,
|
|
48
|
-
fourier_input_size = 2,
|
|
49
|
-
nb_fourier = 8,
|
|
50
|
-
norm = "Relu",
|
|
51
|
-
name = "Net"):
|
|
52
|
-
super ().__init__ ()
|
|
53
|
-
|
|
54
|
-
# --- Activation ---
|
|
55
|
-
self.norm = norm_list.get (norm)
|
|
56
|
-
if self.norm is None:
|
|
57
|
-
print (f"Warning: '{norm}' not recognized, falling back to 'Relu'")
|
|
58
|
-
self.norm = norm_list.get ("Relu")
|
|
59
|
-
|
|
60
|
-
# --- Attributs ---
|
|
61
|
-
self.losses = []
|
|
62
|
-
self.learnings = []
|
|
63
|
-
self.name = name
|
|
64
|
-
|
|
65
|
-
## --- Encodage Fourier ou passthrough ---
|
|
66
|
-
self.encodings, self.f = encode (
|
|
67
|
-
input_size,
|
|
68
|
-
output_size,
|
|
69
|
-
sigmas,
|
|
70
|
-
fourier_input_size,
|
|
71
|
-
nb_fourier
|
|
72
|
-
)
|
|
73
|
-
|
|
74
|
-
# --- Construction du réseau ---
|
|
75
|
-
self.model = create_layers (
|
|
76
|
-
input_size,
|
|
77
|
-
output_size,
|
|
78
|
-
hidden_layers,
|
|
79
|
-
sigmas,
|
|
80
|
-
fourier_input_size,
|
|
81
|
-
nb_fourier,
|
|
82
|
-
self.norm
|
|
83
|
-
)
|
|
84
|
-
|
|
85
|
-
def forward (self, x):
|
|
86
|
-
"""
|
|
87
|
-
Effectue une passe avant du réseau.
|
|
88
|
-
|
|
89
|
-
Parameters
|
|
90
|
-
----------
|
|
91
|
-
x : torch.Tensor
|
|
92
|
-
Entrées du réseau de shape `(N, input_size)`.
|
|
93
|
-
|
|
94
|
-
Returns
|
|
95
|
-
-------
|
|
96
|
-
torch.Tensor
|
|
97
|
-
Sortie du MLP de shape `(N, output_size)`.
|
|
98
|
-
"""
|
|
99
|
-
return infer (self, x)
|
|
8
|
+
from .MLP import MLP
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
+
# Copyright (C) 2025 - 2026 Alexandre Brun
|
|
3
|
+
# This program is free software: you can redistribute it and/or modify
|
|
4
|
+
# it under the terms of the GNU General Public License as published by
|
|
5
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
+
# (at your option) any later version.
|
|
7
|
+
|
|
8
|
+
from ._Trainer_tools import device
|
|
9
|
+
from ._Trainer_tools import init_Trainer, init_train
|
|
10
|
+
from ._Trainer_tools import epoch_logic, update_lr, update_trakers
|
|
11
|
+
from ._Trainer_tools import torch, trange
|
|
12
|
+
|
|
13
|
+
from .._shared import Module
|
|
14
|
+
|
|
15
|
+
class Trainer:
|
|
16
|
+
"""
|
|
17
|
+
Objet de gestion d'entrainement de modèle.
|
|
18
|
+
"""
|
|
19
|
+
def __init__(self ,
|
|
20
|
+
*nets : Module , # Modèles à entraîner
|
|
21
|
+
inputs : torch.Tensor , # Données d'entrée
|
|
22
|
+
outputs : torch.Tensor , # Données de sortie
|
|
23
|
+
init_train_size : float = 0.01 , # Fraction du dataset initiale
|
|
24
|
+
final_train_size : float = 1.0 , # Fraction du dataset finale
|
|
25
|
+
optim : str = 'Adam', # Nom de l'optimiseur
|
|
26
|
+
init_lr : float = 1e-3 , # Learning rate initial
|
|
27
|
+
final_lr : float = 1e-5 , # Learning rate final
|
|
28
|
+
crit : str = 'MSE' , # Fonction de coût
|
|
29
|
+
batch_size : int = 1024 ): # Taille des batchs
|
|
30
|
+
"""
|
|
31
|
+
Initialise l'entraîneur.
|
|
32
|
+
"""
|
|
33
|
+
self.nets , self.batch_size = nets, batch_size
|
|
34
|
+
self.init_train_size, self.final_train_size = init_train_size, final_train_size
|
|
35
|
+
self.init_lr, self.final_lr = init_lr, final_lr
|
|
36
|
+
self.inputs , self.outputs = inputs , outputs
|
|
37
|
+
self.crit , self.optim_list, self.name = init_Trainer (
|
|
38
|
+
nets = nets , # Lie les modèles
|
|
39
|
+
crit = crit , # Critères
|
|
40
|
+
optim = optim , # Optimiseurs
|
|
41
|
+
init_lr = init_lr , # Ajoute au nom
|
|
42
|
+
batch_size = batch_size # Ajoute au nom
|
|
43
|
+
)
|
|
44
|
+
def train (self ,
|
|
45
|
+
num_epochs : int = 1500 , # Nombre d'époques
|
|
46
|
+
disable_tqdm : bool = False , # Désactive la barre de progression
|
|
47
|
+
benchmark : bool = False): # Mode benchmark
|
|
48
|
+
"""
|
|
49
|
+
Lance l'entraînement des modèles.
|
|
50
|
+
"""
|
|
51
|
+
outputs_size = self.outputs.size ( ) [1]
|
|
52
|
+
self.inputs, self.outputs, train_losses, train_lrs, n_samples = init_train (
|
|
53
|
+
inputs = self.inputs , # Envoi sur le device
|
|
54
|
+
outputs = self.outputs , # Envoi sur le device
|
|
55
|
+
init_train_size = self.init_train_size , # Fraction du dataset initiale
|
|
56
|
+
final_train_size = self.final_train_size, # Fraction du dataset finale
|
|
57
|
+
num_epochs = num_epochs , # Utile pour learning rates
|
|
58
|
+
benchmark = benchmark # Active le mode benchmark
|
|
59
|
+
)
|
|
60
|
+
for k, net in enumerate (self.nets):
|
|
61
|
+
net = net.to (device) # Envoi du réseau sur le device
|
|
62
|
+
net.train ()
|
|
63
|
+
for epoch in trange (num_epochs , # Nombre d'époques à effectuer
|
|
64
|
+
desc = f"Training {net.name}", # Paramètre d'affichage
|
|
65
|
+
unit = "epoch" , # Paramètre d'affichage
|
|
66
|
+
disable = disable_tqdm ): # Paramètre d'affichage
|
|
67
|
+
epoch_logic (
|
|
68
|
+
net = net , # Réseau courant
|
|
69
|
+
epoch = epoch , # Epoque actuelle
|
|
70
|
+
train_losses = train_losses , # Résidus de l'entrainement
|
|
71
|
+
n_samples = n_samples [epoch] , # Taille des mini-batchs
|
|
72
|
+
inputs = self.inputs , # Données d'entrée
|
|
73
|
+
outputs = self.outputs , # Données de sortie
|
|
74
|
+
outputs_size = outputs_size , # Nombre de sorties
|
|
75
|
+
batch_size = self.batch_size , # Taille du batch
|
|
76
|
+
optim = self.optim_list [k], # Calcul des gradients
|
|
77
|
+
crit = self.crit # Calcul des résidus
|
|
78
|
+
)
|
|
79
|
+
update_lr (
|
|
80
|
+
init_lr = self.init_lr , # Learning rate initial
|
|
81
|
+
final_lr = self.final_lr , # Learning rate final
|
|
82
|
+
optim = self.optim_list [k], # Met à jour le learning rate
|
|
83
|
+
outputs_size = outputs_size , # Nombre de sorties
|
|
84
|
+
train_losses = train_losses , # Résidus de l'entrainement
|
|
85
|
+
train_lrs = train_lrs , # lrs de l'entrainement
|
|
86
|
+
epoch = epoch # Epoque actuelle
|
|
87
|
+
)
|
|
88
|
+
net = net.to (torch.device ("cpu")) # Envoi du réseau sur le cpu
|
|
89
|
+
update_trakers (
|
|
90
|
+
net = net , # Réseau courant
|
|
91
|
+
train_losses = train_losses, # Met à jour la liste de résidus
|
|
92
|
+
train_lrs = train_lrs ) # Met à jour la liste de lr
|
|
93
|
+
net.eval ()
|
|
94
|
+
self.inputs = self.inputs.to (torch.device ("cpu")) # Envoi sur le cpu
|
|
95
|
+
self.outputs = self.outputs.to (torch.device ("cpu")) # Envoi sur le cpu
|
|
96
|
+
torch.cuda.empty_cache ( ) # Vide le cache
|
|
97
|
+
def __repr__ (self):
|
|
98
|
+
return self.name
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
# NeuralNetworks- Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
+
# Copyright (C) 2025 - 2026 Alexandre Brun
|
|
3
|
+
# This program is free software: you can redistribute it and/or modify
|
|
4
|
+
# it under the terms of the GNU General Public License as published by
|
|
5
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
+
# (at your option) any later version.
|
|
7
|
+
|
|
8
|
+
from .._Dependances import torch, trange, scaler, autocast
|
|
9
|
+
from .._Dependances import device, dev, crits, optims
|
|
10
|
+
from .._shared import Module
|
|
11
|
+
|
|
12
|
+
def epoch_logic (
|
|
13
|
+
net : Module , # Réseau courant
|
|
14
|
+
epoch : int , # Epoche actuelle
|
|
15
|
+
train_losses : torch.Tensor , # Résidus de l'entrainement
|
|
16
|
+
n_samples : int , # Nombre de donnés à prendre
|
|
17
|
+
inputs : torch.Tensor , # Données d'entrée
|
|
18
|
+
outputs : torch.Tensor , # Données de sortie
|
|
19
|
+
outputs_size : int , # Nombre de sorties
|
|
20
|
+
batch_size : int , # Taille des batchs
|
|
21
|
+
optim : torch.optim , # Optimiseur utilisé
|
|
22
|
+
crit : torch.nn.modules.loss): # Critère de loss utilisé
|
|
23
|
+
"""
|
|
24
|
+
Effectue une époque d'entraînement sur des mini-batchs.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
perm = torch.randperm (n_samples, device=device, requires_grad=False)
|
|
28
|
+
|
|
29
|
+
for i in range (0, n_samples, batch_size):
|
|
30
|
+
idx = perm [i : i + batch_size]
|
|
31
|
+
optim.zero_grad (set_to_none = True)
|
|
32
|
+
|
|
33
|
+
with autocast (dev):
|
|
34
|
+
all_loss = crit( net.train_forward(inputs[idx]), outputs[idx] ).mean(dim=0)
|
|
35
|
+
|
|
36
|
+
scaler.scale ( all_loss.mean() ).backward ()
|
|
37
|
+
scaler.step (optim)
|
|
38
|
+
scaler.update ( )
|
|
39
|
+
|
|
40
|
+
train_losses[epoch].add_(all_loss.detach())
|
|
41
|
+
|
|
42
|
+
def generate_learning_rate (Nb_iter : int ):
|
|
43
|
+
"""
|
|
44
|
+
Génère une courbe de learning rate lisse.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
infl = int (0.1 * Nb_iter)
|
|
48
|
+
|
|
49
|
+
lr_curve = torch.empty(Nb_iter, device=device)
|
|
50
|
+
|
|
51
|
+
t = torch.linspace(0.0, 1.0, infl, device=device, requires_grad=False)
|
|
52
|
+
t4 = t*t; t3 = t4*t; t4.mul_(t4) ; t5 = t4*t
|
|
53
|
+
lr_curve[:infl] = 1 - 0.5 * (6*t5 - 15*t4 + 10*t3)
|
|
54
|
+
|
|
55
|
+
t = torch.linspace(0.0, 1.0, Nb_iter - infl, device=device, requires_grad=False)
|
|
56
|
+
t4 = t*t; t3 = t4*t; t4.mul_(t4) ; t5 = t4*t
|
|
57
|
+
lr_curve[infl:] = 0.5 * (1 - 6*t5 - 15*t4 + 10*t3)
|
|
58
|
+
|
|
59
|
+
return lr_curve
|
|
60
|
+
|
|
61
|
+
def update_lr (
|
|
62
|
+
init_lr : float , # Learning rate initial
|
|
63
|
+
final_lr : float , # Learning rate final
|
|
64
|
+
optim : torch.optim , # Optimiseur utilisé
|
|
65
|
+
outputs_size : int , # Nombre de sorties
|
|
66
|
+
train_losses : torch.Tensor, # Derniers résidus
|
|
67
|
+
train_lrs : torch.Tensor, # Learning rates
|
|
68
|
+
epoch : int ): # Epoque courante
|
|
69
|
+
"""
|
|
70
|
+
Calcule un learning rate adaptatif basé sur les pertes récentes.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
if epoch >= 1:
|
|
74
|
+
x = train_losses[max(0, epoch-10):epoch].min(dim=0).values.max()
|
|
75
|
+
else:
|
|
76
|
+
x = train_losses[:1].min(dim=0).values.max()
|
|
77
|
+
|
|
78
|
+
y, u9 = x.clone(), x.clone()
|
|
79
|
+
|
|
80
|
+
y.mul_(y); y.mul_(y)
|
|
81
|
+
u9.mul_(-2); u9.add_(1); u9.addcmul_(x, x, value=1); u9.mul_(u9); u9.mul_(u9)
|
|
82
|
+
|
|
83
|
+
y.sub_(u9); y.add_(1.0); y.mul_(0.5)
|
|
84
|
+
|
|
85
|
+
train_lrs[epoch].clamp_min_(y)
|
|
86
|
+
train_lrs[epoch].mul_(init_lr - final_lr).add_(final_lr)
|
|
87
|
+
|
|
88
|
+
for param_group in optim.param_groups:
|
|
89
|
+
param_group ['lr'] = train_lrs[epoch].item()
|
|
90
|
+
|
|
91
|
+
def update_trakers (
|
|
92
|
+
net : Module , # Réseau courant
|
|
93
|
+
train_losses : torch.Tensor , # Résidus
|
|
94
|
+
train_lrs : torch.Tensor): # Learning rates
|
|
95
|
+
"""
|
|
96
|
+
Met à jour l'historique des pertes et le learning rate du modèle.
|
|
97
|
+
"""
|
|
98
|
+
|
|
99
|
+
net.losses += train_losses.cpu().tolist()
|
|
100
|
+
net.learnings += train_lrs.cpu().tolist()
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def init_train (
|
|
104
|
+
inputs : torch.Tensor, # Données d'entrée
|
|
105
|
+
outputs : torch.Tensor, # Données de sortie
|
|
106
|
+
init_train_size : float , # Proportion de données initiale
|
|
107
|
+
final_train_size : float , # Proportion de données finale
|
|
108
|
+
num_epochs : int , # Nombre d'époques
|
|
109
|
+
benchmark : bool ): # Activation du mode benchmark
|
|
110
|
+
"""
|
|
111
|
+
Prépare les données et l'environnement d'entraînement.
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
torch.backends.cudnn.benchmark = benchmark
|
|
115
|
+
torch.autograd.set_detect_anomaly (benchmark)
|
|
116
|
+
torch.autograd.profiler.profile (benchmark)
|
|
117
|
+
torch.use_deterministic_algorithms (benchmark)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
n_samples = torch.linspace(
|
|
121
|
+
inputs.size(0) * init_train_size,
|
|
122
|
+
inputs.size(0) * final_train_size,
|
|
123
|
+
num_epochs, device = device, requires_grad=False
|
|
124
|
+
).ceil().int()
|
|
125
|
+
|
|
126
|
+
inputs = inputs.to (device)
|
|
127
|
+
outputs = outputs.to (device)
|
|
128
|
+
|
|
129
|
+
train_lrs = generate_learning_rate (num_epochs)
|
|
130
|
+
train_losses = torch.zeros(
|
|
131
|
+
(num_epochs, outputs.size(1)), device=device, requires_grad=False
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
torch.cuda.empty_cache ()
|
|
135
|
+
return inputs, outputs,train_losses, train_lrs, n_samples
|
|
136
|
+
|
|
137
|
+
def init_Trainer (
|
|
138
|
+
nets : list , # Modèles à entraîner
|
|
139
|
+
crit : str , # Fonction de coût
|
|
140
|
+
optim : str , # Optimiseur utilisé
|
|
141
|
+
init_lr : float, # Learning rate initial
|
|
142
|
+
batch_size : int ): # Taille des batchs
|
|
143
|
+
"""
|
|
144
|
+
Initialise le critère de perte et les optimiseurs.
|
|
145
|
+
"""
|
|
146
|
+
name = f"| optim : {optim}\n"
|
|
147
|
+
name += f"| crit : {crit}\n"
|
|
148
|
+
name += f"| init_lr : {init_lr}\n"
|
|
149
|
+
name += f"| batch_size : {batch_size}"
|
|
150
|
+
|
|
151
|
+
optim_list = []
|
|
152
|
+
for net in nets:
|
|
153
|
+
param = [{"params" : net.parameters (), "lr" : init_lr}]
|
|
154
|
+
optim_list.append( optims.get (optim) (param))
|
|
155
|
+
return crits.get (crit), optim_list, name
|
|
@@ -1,51 +1,8 @@
|
|
|
1
1
|
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
-
# Copyright (C) 2026 Alexandre Brun
|
|
2
|
+
# Copyright (C) 2025 - 2026 Alexandre Brun
|
|
3
3
|
# This program is free software: you can redistribute it and/or modify
|
|
4
4
|
# it under the terms of the GNU General Public License as published by
|
|
5
5
|
# the Free Software Foundation, either version 3 of the License, or
|
|
6
6
|
# (at your option) any later version.
|
|
7
7
|
|
|
8
|
-
from
|
|
9
|
-
from .train import train_f
|
|
10
|
-
from .sample_data import sample_data
|
|
11
|
-
|
|
12
|
-
class Trainer:
|
|
13
|
-
|
|
14
|
-
def __init__(self,
|
|
15
|
-
*nets,
|
|
16
|
-
inputs,
|
|
17
|
-
outputs,
|
|
18
|
-
test_size = None,
|
|
19
|
-
optim = 'Adam',
|
|
20
|
-
init_lr = 0.01,
|
|
21
|
-
crit = 'MSE',
|
|
22
|
-
batch_size = float):
|
|
23
|
-
|
|
24
|
-
self.batch_size = batch_size
|
|
25
|
-
self.nets = nets
|
|
26
|
-
self.init_lr = init_lr
|
|
27
|
-
|
|
28
|
-
self.X_train, self.X_test, self.y_train, self.y_test = sample_data (
|
|
29
|
-
inputs,
|
|
30
|
-
outputs,
|
|
31
|
-
test_size
|
|
32
|
-
)
|
|
33
|
-
|
|
34
|
-
# --- Fonction de perte ---
|
|
35
|
-
self.crit = crit_list.get(crit)
|
|
36
|
-
if self.crit is None:
|
|
37
|
-
print(f"Warning: '{self.crit}' not recognized, falling back to 'MSE'")
|
|
38
|
-
self.crit = crit_list.get("MSE")
|
|
39
|
-
|
|
40
|
-
# --- Sélection de l’optimiseur ---
|
|
41
|
-
self.optims = []
|
|
42
|
-
for net in nets:
|
|
43
|
-
params = [{"params": net.parameters(), "lr": self.init_lr}]
|
|
44
|
-
new_optim = optim_list(params).get(optim)
|
|
45
|
-
if new_optim is None:
|
|
46
|
-
print(f"Warning: '{optim}' not recognized, falling back to 'Adam'")
|
|
47
|
-
new_optim = optim_list(params).get("Adam")
|
|
48
|
-
self.optims.append(new_optim)
|
|
49
|
-
|
|
50
|
-
def train (self, num_epochs = 1500, activate_tqdm = True):
|
|
51
|
-
train_f (self, num_epochs, activate_tqdm)
|
|
8
|
+
from .Trainer import Trainer
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
+
# Copyright (C) 2025 - 2026 Alexandre Brun
|
|
3
|
+
# This program is free software: you can redistribute it and/or modify
|
|
4
|
+
# it under the terms of the GNU General Public License as published by
|
|
5
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
+
# (at your option) any later version.
|
|
7
|
+
|
|
8
|
+
from .._Dependances import *
|
|
9
|
+
from .._shared import Module
|
|
10
|
+
|
|
11
|
+
class VAE(Module):
|
|
12
|
+
def __init__(self , #
|
|
13
|
+
imsize : int , #
|
|
14
|
+
latentsize : int , #
|
|
15
|
+
labelsize : int , #
|
|
16
|
+
channels : list = [16, 32, 16, 8], #
|
|
17
|
+
linear_channels : list = [100] , #
|
|
18
|
+
name : str = "encoder" , #
|
|
19
|
+
norm : str = "Relu" , #
|
|
20
|
+
norm_cc : str = "Relu" ): #
|
|
21
|
+
|
|
22
|
+
super().__init__(name , #
|
|
23
|
+
imsize = imsize , #
|
|
24
|
+
latentsize = latentsize , #
|
|
25
|
+
labelsize = labelsize , #
|
|
26
|
+
channels = channels , #
|
|
27
|
+
linear_channels = linear_channels, #
|
|
28
|
+
name = name , #
|
|
29
|
+
norm = norm , #
|
|
30
|
+
norm_cc = norm_cc ) #
|
|
31
|
+
|
|
32
|
+
self.imsize = imsize
|
|
33
|
+
self.latentsize = latentsize
|
|
34
|
+
self.labelsize = labelsize
|
|
35
|
+
|
|
36
|
+
# Start latent conv channels
|
|
37
|
+
if norm is None:
|
|
38
|
+
self.norm = nn.Identity()
|
|
39
|
+
else:
|
|
40
|
+
self.norm = norms.get(norm)
|
|
41
|
+
|
|
42
|
+
if norm_cc is None:
|
|
43
|
+
self.norm_cc = nn.Identity()
|
|
44
|
+
else:
|
|
45
|
+
self.norm_cc = norms.get(norm_cc)
|
|
46
|
+
|
|
47
|
+
# ----- Encoder -----
|
|
48
|
+
Elayers = []
|
|
49
|
+
in_ch = 1 # grayscale input
|
|
50
|
+
for out_ch in channels:
|
|
51
|
+
Elayers.append(nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1))
|
|
52
|
+
Elayers.append(self.norm_cc)
|
|
53
|
+
Elayers.append(nn.MaxPool2d(kernel_size=3, stride=3, padding=1))
|
|
54
|
+
in_ch = out_ch
|
|
55
|
+
|
|
56
|
+
# Compute final flattened size dynamically
|
|
57
|
+
with torch.no_grad():
|
|
58
|
+
dummy = torch.zeros(1, 1, imsize, imsize)
|
|
59
|
+
for layer in Elayers:
|
|
60
|
+
dummy = layer(dummy)
|
|
61
|
+
flat_dim = dummy.numel()
|
|
62
|
+
del dummy
|
|
63
|
+
|
|
64
|
+
Elayers.append(nn.Flatten())
|
|
65
|
+
Elayers.append(nn.Linear(flat_dim, linear_channels[0]))#(cl_nbr+1) * int(imsize/2**cl_nbr)**2, latentsize * 30))
|
|
66
|
+
Elayers.append(self.norm)
|
|
67
|
+
Elayers.append(nn.Linear(linear_channels[0], latentsize))
|
|
68
|
+
Elayers.append(self.norm)
|
|
69
|
+
self.Emodel = nn.Sequential(*Elayers)
|
|
70
|
+
|
|
71
|
+
# ----- Decoder -----
|
|
72
|
+
Dlayers = []
|
|
73
|
+
Dlayers.append(nn.Linear(latentsize, int((labelsize+latentsize)/2)))
|
|
74
|
+
Dlayers.append(self.norm)
|
|
75
|
+
Dlayers.append(nn.Linear(int((labelsize+latentsize)/2), int((labelsize+latentsize))))
|
|
76
|
+
Dlayers.append(self.norm)
|
|
77
|
+
Dlayers.append(nn.Linear(int((labelsize+latentsize)), int((labelsize+latentsize))))
|
|
78
|
+
Dlayers.append(self.norm)
|
|
79
|
+
Dlayers.append(nn.Linear(int((labelsize+latentsize)), labelsize))
|
|
80
|
+
Dlayers.append(self.norm)
|
|
81
|
+
|
|
82
|
+
self.Dmodel = nn.Sequential(*Dlayers)
|
|
83
|
+
|
|
84
|
+
def encode(self, inputs):
|
|
85
|
+
image = np.array(inputs)
|
|
86
|
+
inputs = tensorise(inputs).to(device)
|
|
87
|
+
|
|
88
|
+
if image.ndim == 4:
|
|
89
|
+
x = inputs
|
|
90
|
+
elif inputs.ndim == 3: # [H, W, C]? Or [C, H, W]?
|
|
91
|
+
x = inputs.unsqueeze(0) # → [1, C, H, W]
|
|
92
|
+
elif image.ndim == 2: # [H, W]
|
|
93
|
+
x = inputs.unsqueeze(0).unsqueeze(0) # → [1, 1, H, W]
|
|
94
|
+
inputs = inputs.to('cpu')
|
|
95
|
+
|
|
96
|
+
self.Emodel = self.Emodel.to(device)
|
|
97
|
+
output = self.Emodel(x).flatten()
|
|
98
|
+
self.Emodel = self.Emodel.to('cpu')
|
|
99
|
+
return output
|
|
100
|
+
|
|
101
|
+
def decode(self, vector):
|
|
102
|
+
vector = tensorise(vector).to(device)
|
|
103
|
+
x = vector.view(1, 1, 1, self.latentsize) # batch=1, channels=8, h=1, w=1
|
|
104
|
+
vector = tensorise(vector).to('cpu')
|
|
105
|
+
self.Dmodel = self.Dmodel.to(device)
|
|
106
|
+
output = self.Dmodel(x).cpu().detach().numpy()[0][0]
|
|
107
|
+
self.Dmodel = self.Dmodel.to('cpu')
|
|
108
|
+
return output
|
|
109
|
+
|
|
110
|
+
def _forward (self, x):
|
|
111
|
+
return self.Dmodel(x)
|
|
112
|
+
|
|
113
|
+
def train_forward (self, x):
|
|
114
|
+
return self.Dmodel(self.Emodel(x))
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
# NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
+
# Copyright (C) 2025 - 2026 Alexandre Brun
|
|
3
|
+
# This program is free software: you can redistribute it and/or modify
|
|
4
|
+
# it under the terms of the GNU General Public License as published by
|
|
5
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
+
# (at your option) any later version.
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
# NeuralNetworksBeta - Multi-Layer Perceptrons avec encodage Fouriertsize
|
|
2
|
+
# Copyright (C) 2025 - 2026 Alexandre Brun
|
|
3
|
+
# This program is free software: you can redistribute it and/or modify
|
|
4
|
+
# it under the terms of the GNU General Public License as published by
|
|
5
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
+
# (at your option) any later version.
|
|
7
|
+
|
|
8
|
+
from .VAE import VAE
|