NeuralNetworks 0.2.5__tar.gz → 0.2.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. {neuralnetworks-0.2.5 → neuralnetworks-0.2.8}/PKG-INFO +8 -15
  2. {neuralnetworks-0.2.5 → neuralnetworks-0.2.8}/pyproject.toml +7 -14
  3. neuralnetworks-0.2.8/src/NeuralNetworks/MLP/MLP.py +64 -0
  4. neuralnetworks-0.2.8/src/NeuralNetworks/MLP/_MLP_tools.py +74 -0
  5. neuralnetworks-0.2.8/src/NeuralNetworks/MLP/__init__.py +8 -0
  6. neuralnetworks-0.2.8/src/NeuralNetworks/Trainer/Trainer.py +98 -0
  7. neuralnetworks-0.2.8/src/NeuralNetworks/Trainer/_Trainer_tools.py +155 -0
  8. neuralnetworks-0.2.8/src/NeuralNetworks/Trainer/__init__.py +8 -0
  9. neuralnetworks-0.2.8/src/NeuralNetworks/VAE/VAE.py +114 -0
  10. neuralnetworks-0.2.8/src/NeuralNetworks/VAE/_VAE_tools.py +6 -0
  11. neuralnetworks-0.2.8/src/NeuralNetworks/VAE/__init__.py +8 -0
  12. neuralnetworks-0.2.5/src/NeuralNetworks/Dependances/matplot.py → neuralnetworks-0.2.8/src/NeuralNetworks/_Dependances/__init__.py +12 -12
  13. neuralnetworks-0.2.8/src/NeuralNetworks/_Dependances/pytorch.py +125 -0
  14. neuralnetworks-0.2.8/src/NeuralNetworks/_Dependances/tools.py +15 -0
  15. neuralnetworks-0.2.8/src/NeuralNetworks/_UI/Learnings.py +31 -0
  16. neuralnetworks-0.2.8/src/NeuralNetworks/_UI/Losses.py +48 -0
  17. {neuralnetworks-0.2.5/src/NeuralNetworks/UI → neuralnetworks-0.2.8/src/NeuralNetworks/_UI}/__init__.py +1 -1
  18. neuralnetworks-0.2.8/src/NeuralNetworks/_UI/_plot.py +50 -0
  19. {neuralnetworks-0.2.5 → neuralnetworks-0.2.8}/src/NeuralNetworks/__init__.py +6 -7
  20. neuralnetworks-0.2.8/src/NeuralNetworks/_shared/__init__.py +8 -0
  21. neuralnetworks-0.2.8/src/NeuralNetworks/_shared/module.py +115 -0
  22. {neuralnetworks-0.2.5 → neuralnetworks-0.2.8}/src/NeuralNetworks.egg-info/PKG-INFO +8 -15
  23. neuralnetworks-0.2.8/src/NeuralNetworks.egg-info/SOURCES.txt +27 -0
  24. neuralnetworks-0.2.8/src/NeuralNetworks.egg-info/requires.txt +7 -0
  25. neuralnetworks-0.2.5/src/NeuralNetworks/Dependances/__init__.py +0 -75
  26. neuralnetworks-0.2.5/src/NeuralNetworks/Dependances/pytorch.py +0 -111
  27. neuralnetworks-0.2.5/src/NeuralNetworks/MLP/FourierFeatures.py +0 -89
  28. neuralnetworks-0.2.5/src/NeuralNetworks/MLP/Layers.py +0 -31
  29. neuralnetworks-0.2.5/src/NeuralNetworks/MLP/__init__.py +0 -99
  30. neuralnetworks-0.2.5/src/NeuralNetworks/MLP/inference.py +0 -26
  31. neuralnetworks-0.2.5/src/NeuralNetworks/Trainer/__init__.py +0 -51
  32. neuralnetworks-0.2.5/src/NeuralNetworks/Trainer/dynamic_learning_rate.py +0 -79
  33. neuralnetworks-0.2.5/src/NeuralNetworks/Trainer/sample_data.py +0 -19
  34. neuralnetworks-0.2.5/src/NeuralNetworks/Trainer/train.py +0 -75
  35. neuralnetworks-0.2.5/src/NeuralNetworks/UI/Learnings.py +0 -45
  36. neuralnetworks-0.2.5/src/NeuralNetworks/UI/Losses.py +0 -45
  37. neuralnetworks-0.2.5/src/NeuralNetworks.egg-info/SOURCES.txt +0 -23
  38. neuralnetworks-0.2.5/src/NeuralNetworks.egg-info/requires.txt +0 -14
  39. {neuralnetworks-0.2.5 → neuralnetworks-0.2.8}/LICENSE +0 -0
  40. {neuralnetworks-0.2.5 → neuralnetworks-0.2.8}/README.md +0 -0
  41. {neuralnetworks-0.2.5 → neuralnetworks-0.2.8}/setup.cfg +0 -0
  42. {neuralnetworks-0.2.5 → neuralnetworks-0.2.8}/src/NeuralNetworks.egg-info/dependency_links.txt +0 -0
  43. {neuralnetworks-0.2.5 → neuralnetworks-0.2.8}/src/NeuralNetworks.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: NeuralNetworks
3
- Version: 0.2.5
3
+ Version: 0.2.8
4
4
  Summary: Multi-Layer Perceptrons with multi-Fourier encoding, variable learning rate, visualization and PyTorch compilation
5
5
  Author-email: Alexandre Brun <alexandre51160@gmail.com>
6
6
  License: GPL-3.0-or-later
@@ -12,20 +12,13 @@ Classifier: Operating System :: OS Independent
12
12
  Requires-Python: >=3.9
13
13
  Description-Content-Type: text/markdown
14
14
  License-File: LICENSE
15
- Requires-Dist: numpy>=1.25
16
- Requires-Dist: matplotlib>=3.10
17
- Requires-Dist: tqdm>=4.66
18
- Requires-Dist: torch<3.0,>=2.9.1
19
- Requires-Dist: torchvision<1.0,>=0.24
20
- Requires-Dist: torchaudio<3.0,>=2.9
21
- Requires-Dist: torchmetrics>=1.8
22
- Requires-Dist: visualtorch>=0.2
23
- Requires-Dist: random-fourier-features-pytorch>=1.0
24
- Requires-Dist: IPython>=8.16
25
- Requires-Dist: requests
26
- Requires-Dist: airfrans
27
- Requires-Dist: scipy
28
- Requires-Dist: pandas
15
+ Requires-Dist: torch
16
+ Requires-Dist: numpy
17
+ Requires-Dist: matplotlib
18
+ Requires-Dist: onnx
19
+ Requires-Dist: onnx-simplifier
20
+ Requires-Dist: tqdm
21
+ Requires-Dist: visualtorch
29
22
  Dynamic: license-file
30
23
 
31
24
  # NeuralNetworks Module
@@ -19,20 +19,13 @@ classifiers = [
19
19
  ]
20
20
 
21
21
  dependencies = [
22
- "numpy>=1.25",
23
- "matplotlib>=3.10",
24
- "tqdm>=4.66",
25
- "torch>=2.9.1,<3.0",
26
- "torchvision>=0.24,<1.0",
27
- "torchaudio>=2.9,<3.0",
28
- "torchmetrics>=1.8",
29
- "visualtorch>=0.2",
30
- "random-fourier-features-pytorch>=1.0",
31
- "IPython>=8.16",
32
- "requests",
33
- "airfrans",
34
- "scipy",
35
- "pandas"
22
+ "torch",
23
+ "numpy",
24
+ "matplotlib",
25
+ "onnx",
26
+ "onnx-simplifier",
27
+ "tqdm",
28
+ "visualtorch"
36
29
  ]
37
30
 
38
31
  [project.urls]
@@ -0,0 +1,64 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from ._MLP_tools import encode, create_layers, torch
9
+ from .._shared import Module
10
+
11
+ class MLP (Module):
12
+ """
13
+ Réseau de neurones MLP avec encodage optionnel de type Fourier.
14
+ """
15
+ def __init__ (self ,
16
+ input_size : int = 1 , # Dimension d'entrée
17
+ output_size : int = 1 , # Dimension de sortie
18
+ hidden_layers : list = [1] , # Tailles des couches cachées
19
+ sigmas : list = None , # Répartition des fréquences
20
+ fourier_input_size : int = 1 , # Dimension d'entrée pour l'encodage Fourier
21
+ nb_fourier : int = 8 , # Nombre de composantes Fourier
22
+ norm : str = "Relu", # Fonction d'activation / normalisation
23
+ name : str = "Net"): # Nom du modèle
24
+
25
+ """ Donnés de reconstruction de l'objet """
26
+ super ().__init__ (name , #
27
+ input_size = input_size , #
28
+ output_size = output_size , #
29
+ hidden_layers = hidden_layers , #
30
+ sigmas = 0 if sigmas is None else sigmas, #
31
+ fourier_input_size = fourier_input_size , #
32
+ nb_fourier = nb_fourier , #
33
+ norm = norm , #
34
+ name = name ) #
35
+
36
+ self.encodings, self.f = encode (
37
+ input_size = input_size , # Couche d'adaptation d'entrée
38
+ output_size = output_size , # Couche d'adaptation de sortie
39
+ sigmas = sigmas , # Répartition des fréquences
40
+ fourier_input_size = fourier_input_size, # Encode les premiers inputs
41
+ nb_fourier = nb_fourier # Attribue les fréquences
42
+ )
43
+ self.model = create_layers (
44
+ input_size = input_size , # Créé une couche d'entrée
45
+ output_size = output_size , # Créé une couche de sortie
46
+ hidden_layers = hidden_layers , # Créé des couches intermédiaires
47
+ sigmas = sigmas , # Répartition des fréquences
48
+ fourier_input_size = fourier_input_size, # Dimension d'encodage Fourier
49
+ nb_fourier = nb_fourier , # Nombre de composantes Fourier
50
+ norm = norm # Fonction d'activation
51
+ )
52
+ def _forward (self, x : torch.Tensor):
53
+ """
54
+ Forward pass interne avec concaténation des encodages.
55
+ """
56
+ results_list = [self.model (encoding (x)) for encoding in self.encodings]
57
+ return self.f (torch.cat (results_list, dim = 1))
58
+
59
+ @property
60
+ def _dummy_input(self):
61
+ """
62
+ Données d'entrées pour enregistrement en .onnx
63
+ """
64
+ return torch.randn(1, self.Reconstruction_data["input_size"])
@@ -0,0 +1,74 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from .._Dependances import torch, nn, np, pi2, norms, device
9
+
10
+ def create_layers(
11
+ input_size : int , #
12
+ output_size : int , #
13
+ hidden_layers : int , #
14
+ sigmas : list, #
15
+ fourier_input_size : int , #
16
+ nb_fourier : int , #
17
+ norm : str): #
18
+
19
+ if fourier_input_size > input_size:
20
+ raise Exception ("fourier_input_size > input_size impossible")
21
+ if sigmas is None or isinstance(sigmas, int) or (
22
+ isinstance(sigmas, np.ndarray) and sigmas.ndim == 0):
23
+ layer_list = [
24
+ nn.Linear (input_size, hidden_layers [0]),
25
+ norms.get (norm)
26
+ ]
27
+ else:
28
+ layer_list = [
29
+ nn.Linear (2*nb_fourier + input_size-fourier_input_size, hidden_layers [0]),
30
+ norms.get (norm)
31
+ ]
32
+
33
+ for k in range (len (hidden_layers) - 1):
34
+ layer_list.extend ([
35
+ nn.Linear (hidden_layers [k], hidden_layers [k + 1]),
36
+ norms.get (norm)
37
+ ])
38
+ layer_list.append (nn.Linear (hidden_layers [-1], output_size))
39
+
40
+ return nn.Sequential (*layer_list)
41
+
42
+ class FourierEncoding (nn.Module):
43
+
44
+ def __init__ (self,
45
+ nb_fourier : int , #
46
+ fourier_input_size : int , #
47
+ sigma : float): #
48
+ super ().__init__ ()
49
+
50
+ self.B = nn.Parameter (torch.randn (nb_fourier, fourier_input_size) * sigma)
51
+ self.size = fourier_input_size
52
+
53
+ def forward (self, x : torch.Tensor):
54
+ x_fourier, x_rest = x.split([self.size, x.shape[-1] - self.size], dim=-1)
55
+ vp = pi2 * x_fourier @ self.B.T
56
+ return torch.cat ((torch.cos(vp), torch.sin(vp), x_rest), dim = -1)
57
+
58
+ def encode (
59
+ input_size : int , #
60
+ output_size : int , #
61
+ sigmas : list, #
62
+ fourier_input_size : int , #
63
+ nb_fourier : int): #
64
+
65
+ if sigmas is None or isinstance(sigmas, int) or (
66
+ isinstance(sigmas, np.ndarray) and sigmas.ndim == 0):
67
+ return nn.ModuleList ([nn.Identity ()]), nn.Identity ()
68
+
69
+ size = fourier_input_size
70
+
71
+ return (
72
+ nn.ModuleList ([FourierEncoding (nb_fourier, size, sigma) for sigma in sigmas]),
73
+ nn.Linear (len (sigmas) * output_size, output_size)
74
+ )
@@ -0,0 +1,8 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from .MLP import MLP
@@ -0,0 +1,98 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from ._Trainer_tools import device
9
+ from ._Trainer_tools import init_Trainer, init_train
10
+ from ._Trainer_tools import epoch_logic, update_lr, update_trakers
11
+ from ._Trainer_tools import torch, trange
12
+
13
+ from .._shared import Module
14
+
15
+ class Trainer:
16
+ """
17
+ Objet de gestion d'entrainement de modèle.
18
+ """
19
+ def __init__(self ,
20
+ *nets : Module , # Modèles à entraîner
21
+ inputs : torch.Tensor , # Données d'entrée
22
+ outputs : torch.Tensor , # Données de sortie
23
+ init_train_size : float = 0.01 , # Fraction du dataset initiale
24
+ final_train_size : float = 1.0 , # Fraction du dataset finale
25
+ optim : str = 'Adam', # Nom de l'optimiseur
26
+ init_lr : float = 1e-3 , # Learning rate initial
27
+ final_lr : float = 1e-5 , # Learning rate final
28
+ crit : str = 'MSE' , # Fonction de coût
29
+ batch_size : int = 1024 ): # Taille des batchs
30
+ """
31
+ Initialise l'entraîneur.
32
+ """
33
+ self.nets , self.batch_size = nets, batch_size
34
+ self.init_train_size, self.final_train_size = init_train_size, final_train_size
35
+ self.init_lr, self.final_lr = init_lr, final_lr
36
+ self.inputs , self.outputs = inputs , outputs
37
+ self.crit , self.optim_list, self.name = init_Trainer (
38
+ nets = nets , # Lie les modèles
39
+ crit = crit , # Critères
40
+ optim = optim , # Optimiseurs
41
+ init_lr = init_lr , # Ajoute au nom
42
+ batch_size = batch_size # Ajoute au nom
43
+ )
44
+ def train (self ,
45
+ num_epochs : int = 1500 , # Nombre d'époques
46
+ disable_tqdm : bool = False , # Désactive la barre de progression
47
+ benchmark : bool = False): # Mode benchmark
48
+ """
49
+ Lance l'entraînement des modèles.
50
+ """
51
+ outputs_size = self.outputs.size ( ) [1]
52
+ self.inputs, self.outputs, train_losses, train_lrs, n_samples = init_train (
53
+ inputs = self.inputs , # Envoi sur le device
54
+ outputs = self.outputs , # Envoi sur le device
55
+ init_train_size = self.init_train_size , # Fraction du dataset initiale
56
+ final_train_size = self.final_train_size, # Fraction du dataset finale
57
+ num_epochs = num_epochs , # Utile pour learning rates
58
+ benchmark = benchmark # Active le mode benchmark
59
+ )
60
+ for k, net in enumerate (self.nets):
61
+ net = net.to (device) # Envoi du réseau sur le device
62
+ net.train ()
63
+ for epoch in trange (num_epochs , # Nombre d'époques à effectuer
64
+ desc = f"Training {net.name}", # Paramètre d'affichage
65
+ unit = "epoch" , # Paramètre d'affichage
66
+ disable = disable_tqdm ): # Paramètre d'affichage
67
+ epoch_logic (
68
+ net = net , # Réseau courant
69
+ epoch = epoch , # Epoque actuelle
70
+ train_losses = train_losses , # Résidus de l'entrainement
71
+ n_samples = n_samples [epoch] , # Taille des mini-batchs
72
+ inputs = self.inputs , # Données d'entrée
73
+ outputs = self.outputs , # Données de sortie
74
+ outputs_size = outputs_size , # Nombre de sorties
75
+ batch_size = self.batch_size , # Taille du batch
76
+ optim = self.optim_list [k], # Calcul des gradients
77
+ crit = self.crit # Calcul des résidus
78
+ )
79
+ update_lr (
80
+ init_lr = self.init_lr , # Learning rate initial
81
+ final_lr = self.final_lr , # Learning rate final
82
+ optim = self.optim_list [k], # Met à jour le learning rate
83
+ outputs_size = outputs_size , # Nombre de sorties
84
+ train_losses = train_losses , # Résidus de l'entrainement
85
+ train_lrs = train_lrs , # lrs de l'entrainement
86
+ epoch = epoch # Epoque actuelle
87
+ )
88
+ net = net.to (torch.device ("cpu")) # Envoi du réseau sur le cpu
89
+ update_trakers (
90
+ net = net , # Réseau courant
91
+ train_losses = train_losses, # Met à jour la liste de résidus
92
+ train_lrs = train_lrs ) # Met à jour la liste de lr
93
+ net.eval ()
94
+ self.inputs = self.inputs.to (torch.device ("cpu")) # Envoi sur le cpu
95
+ self.outputs = self.outputs.to (torch.device ("cpu")) # Envoi sur le cpu
96
+ torch.cuda.empty_cache ( ) # Vide le cache
97
+ def __repr__ (self):
98
+ return self.name
@@ -0,0 +1,155 @@
1
+ # NeuralNetworks- Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from .._Dependances import torch, trange, scaler, autocast
9
+ from .._Dependances import device, dev, crits, optims
10
+ from .._shared import Module
11
+
12
+ def epoch_logic (
13
+ net : Module , # Réseau courant
14
+ epoch : int , # Epoche actuelle
15
+ train_losses : torch.Tensor , # Résidus de l'entrainement
16
+ n_samples : int , # Nombre de donnés à prendre
17
+ inputs : torch.Tensor , # Données d'entrée
18
+ outputs : torch.Tensor , # Données de sortie
19
+ outputs_size : int , # Nombre de sorties
20
+ batch_size : int , # Taille des batchs
21
+ optim : torch.optim , # Optimiseur utilisé
22
+ crit : torch.nn.modules.loss): # Critère de loss utilisé
23
+ """
24
+ Effectue une époque d'entraînement sur des mini-batchs.
25
+ """
26
+
27
+ perm = torch.randperm (n_samples, device=device, requires_grad=False)
28
+
29
+ for i in range (0, n_samples, batch_size):
30
+ idx = perm [i : i + batch_size]
31
+ optim.zero_grad (set_to_none = True)
32
+
33
+ with autocast (dev):
34
+ all_loss = crit( net.train_forward(inputs[idx]), outputs[idx] ).mean(dim=0)
35
+
36
+ scaler.scale ( all_loss.mean() ).backward ()
37
+ scaler.step (optim)
38
+ scaler.update ( )
39
+
40
+ train_losses[epoch].add_(all_loss.detach())
41
+
42
+ def generate_learning_rate (Nb_iter : int ):
43
+ """
44
+ Génère une courbe de learning rate lisse.
45
+ """
46
+
47
+ infl = int (0.1 * Nb_iter)
48
+
49
+ lr_curve = torch.empty(Nb_iter, device=device)
50
+
51
+ t = torch.linspace(0.0, 1.0, infl, device=device, requires_grad=False)
52
+ t4 = t*t; t3 = t4*t; t4.mul_(t4) ; t5 = t4*t
53
+ lr_curve[:infl] = 1 - 0.5 * (6*t5 - 15*t4 + 10*t3)
54
+
55
+ t = torch.linspace(0.0, 1.0, Nb_iter - infl, device=device, requires_grad=False)
56
+ t4 = t*t; t3 = t4*t; t4.mul_(t4) ; t5 = t4*t
57
+ lr_curve[infl:] = 0.5 * (1 - 6*t5 - 15*t4 + 10*t3)
58
+
59
+ return lr_curve
60
+
61
+ def update_lr (
62
+ init_lr : float , # Learning rate initial
63
+ final_lr : float , # Learning rate final
64
+ optim : torch.optim , # Optimiseur utilisé
65
+ outputs_size : int , # Nombre de sorties
66
+ train_losses : torch.Tensor, # Derniers résidus
67
+ train_lrs : torch.Tensor, # Learning rates
68
+ epoch : int ): # Epoque courante
69
+ """
70
+ Calcule un learning rate adaptatif basé sur les pertes récentes.
71
+ """
72
+
73
+ if epoch >= 1:
74
+ x = train_losses[max(0, epoch-10):epoch].min(dim=0).values.max()
75
+ else:
76
+ x = train_losses[:1].min(dim=0).values.max()
77
+
78
+ y, u9 = x.clone(), x.clone()
79
+
80
+ y.mul_(y); y.mul_(y)
81
+ u9.mul_(-2); u9.add_(1); u9.addcmul_(x, x, value=1); u9.mul_(u9); u9.mul_(u9)
82
+
83
+ y.sub_(u9); y.add_(1.0); y.mul_(0.5)
84
+
85
+ train_lrs[epoch].clamp_min_(y)
86
+ train_lrs[epoch].mul_(init_lr - final_lr).add_(final_lr)
87
+
88
+ for param_group in optim.param_groups:
89
+ param_group ['lr'] = train_lrs[epoch].item()
90
+
91
+ def update_trakers (
92
+ net : Module , # Réseau courant
93
+ train_losses : torch.Tensor , # Résidus
94
+ train_lrs : torch.Tensor): # Learning rates
95
+ """
96
+ Met à jour l'historique des pertes et le learning rate du modèle.
97
+ """
98
+
99
+ net.losses += train_losses.cpu().tolist()
100
+ net.learnings += train_lrs.cpu().tolist()
101
+
102
+
103
+ def init_train (
104
+ inputs : torch.Tensor, # Données d'entrée
105
+ outputs : torch.Tensor, # Données de sortie
106
+ init_train_size : float , # Proportion de données initiale
107
+ final_train_size : float , # Proportion de données finale
108
+ num_epochs : int , # Nombre d'époques
109
+ benchmark : bool ): # Activation du mode benchmark
110
+ """
111
+ Prépare les données et l'environnement d'entraînement.
112
+ """
113
+
114
+ torch.backends.cudnn.benchmark = benchmark
115
+ torch.autograd.set_detect_anomaly (benchmark)
116
+ torch.autograd.profiler.profile (benchmark)
117
+ torch.use_deterministic_algorithms (benchmark)
118
+
119
+
120
+ n_samples = torch.linspace(
121
+ inputs.size(0) * init_train_size,
122
+ inputs.size(0) * final_train_size,
123
+ num_epochs, device = device, requires_grad=False
124
+ ).ceil().int()
125
+
126
+ inputs = inputs.to (device)
127
+ outputs = outputs.to (device)
128
+
129
+ train_lrs = generate_learning_rate (num_epochs)
130
+ train_losses = torch.zeros(
131
+ (num_epochs, outputs.size(1)), device=device, requires_grad=False
132
+ )
133
+
134
+ torch.cuda.empty_cache ()
135
+ return inputs, outputs,train_losses, train_lrs, n_samples
136
+
137
+ def init_Trainer (
138
+ nets : list , # Modèles à entraîner
139
+ crit : str , # Fonction de coût
140
+ optim : str , # Optimiseur utilisé
141
+ init_lr : float, # Learning rate initial
142
+ batch_size : int ): # Taille des batchs
143
+ """
144
+ Initialise le critère de perte et les optimiseurs.
145
+ """
146
+ name = f"| optim : {optim}\n"
147
+ name += f"| crit : {crit}\n"
148
+ name += f"| init_lr : {init_lr}\n"
149
+ name += f"| batch_size : {batch_size}"
150
+
151
+ optim_list = []
152
+ for net in nets:
153
+ param = [{"params" : net.parameters (), "lr" : init_lr}]
154
+ optim_list.append( optims.get (optim) (param))
155
+ return crits.get (crit), optim_list, name
@@ -0,0 +1,8 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from .Trainer import Trainer
@@ -0,0 +1,114 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from .._Dependances import *
9
+ from .._shared import Module
10
+
11
+ class VAE(Module):
12
+ def __init__(self , #
13
+ imsize : int , #
14
+ latentsize : int , #
15
+ labelsize : int , #
16
+ channels : list = [16, 32, 16, 8], #
17
+ linear_channels : list = [100] , #
18
+ name : str = "encoder" , #
19
+ norm : str = "Relu" , #
20
+ norm_cc : str = "Relu" ): #
21
+
22
+ super().__init__(name , #
23
+ imsize = imsize , #
24
+ latentsize = latentsize , #
25
+ labelsize = labelsize , #
26
+ channels = channels , #
27
+ linear_channels = linear_channels, #
28
+ name = name , #
29
+ norm = norm , #
30
+ norm_cc = norm_cc ) #
31
+
32
+ self.imsize = imsize
33
+ self.latentsize = latentsize
34
+ self.labelsize = labelsize
35
+
36
+ # Start latent conv channels
37
+ if norm is None:
38
+ self.norm = nn.Identity()
39
+ else:
40
+ self.norm = norms.get(norm)
41
+
42
+ if norm_cc is None:
43
+ self.norm_cc = nn.Identity()
44
+ else:
45
+ self.norm_cc = norms.get(norm_cc)
46
+
47
+ # ----- Encoder -----
48
+ Elayers = []
49
+ in_ch = 1 # grayscale input
50
+ for out_ch in channels:
51
+ Elayers.append(nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1))
52
+ Elayers.append(self.norm_cc)
53
+ Elayers.append(nn.MaxPool2d(kernel_size=3, stride=3, padding=1))
54
+ in_ch = out_ch
55
+
56
+ # Compute final flattened size dynamically
57
+ with torch.no_grad():
58
+ dummy = torch.zeros(1, 1, imsize, imsize)
59
+ for layer in Elayers:
60
+ dummy = layer(dummy)
61
+ flat_dim = dummy.numel()
62
+ del dummy
63
+
64
+ Elayers.append(nn.Flatten())
65
+ Elayers.append(nn.Linear(flat_dim, linear_channels[0]))#(cl_nbr+1) * int(imsize/2**cl_nbr)**2, latentsize * 30))
66
+ Elayers.append(self.norm)
67
+ Elayers.append(nn.Linear(linear_channels[0], latentsize))
68
+ Elayers.append(self.norm)
69
+ self.Emodel = nn.Sequential(*Elayers)
70
+
71
+ # ----- Decoder -----
72
+ Dlayers = []
73
+ Dlayers.append(nn.Linear(latentsize, int((labelsize+latentsize)/2)))
74
+ Dlayers.append(self.norm)
75
+ Dlayers.append(nn.Linear(int((labelsize+latentsize)/2), int((labelsize+latentsize))))
76
+ Dlayers.append(self.norm)
77
+ Dlayers.append(nn.Linear(int((labelsize+latentsize)), int((labelsize+latentsize))))
78
+ Dlayers.append(self.norm)
79
+ Dlayers.append(nn.Linear(int((labelsize+latentsize)), labelsize))
80
+ Dlayers.append(self.norm)
81
+
82
+ self.Dmodel = nn.Sequential(*Dlayers)
83
+
84
+ def encode(self, inputs):
85
+ image = np.array(inputs)
86
+ inputs = tensorise(inputs).to(device)
87
+
88
+ if image.ndim == 4:
89
+ x = inputs
90
+ elif inputs.ndim == 3: # [H, W, C]? Or [C, H, W]?
91
+ x = inputs.unsqueeze(0) # → [1, C, H, W]
92
+ elif image.ndim == 2: # [H, W]
93
+ x = inputs.unsqueeze(0).unsqueeze(0) # → [1, 1, H, W]
94
+ inputs = inputs.to('cpu')
95
+
96
+ self.Emodel = self.Emodel.to(device)
97
+ output = self.Emodel(x).flatten()
98
+ self.Emodel = self.Emodel.to('cpu')
99
+ return output
100
+
101
+ def decode(self, vector):
102
+ vector = tensorise(vector).to(device)
103
+ x = vector.view(1, 1, 1, self.latentsize) # batch=1, channels=8, h=1, w=1
104
+ vector = tensorise(vector).to('cpu')
105
+ self.Dmodel = self.Dmodel.to(device)
106
+ output = self.Dmodel(x).cpu().detach().numpy()[0][0]
107
+ self.Dmodel = self.Dmodel.to('cpu')
108
+ return output
109
+
110
+ def _forward (self, x):
111
+ return self.Dmodel(x)
112
+
113
+ def train_forward (self, x):
114
+ return self.Dmodel(self.Emodel(x))
@@ -0,0 +1,6 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
@@ -0,0 +1,8 @@
1
+ # NeuralNetworksBeta - Multi-Layer Perceptrons avec encodage Fouriertsize
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from .VAE import VAE
@@ -1,5 +1,5 @@
1
1
  # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
3
  # This program is free software: you can redistribute it and/or modify
4
4
  # it under the terms of the GNU General Public License as published by
5
5
  # the Free Software Foundation, either version 3 of the License, or
@@ -7,19 +7,19 @@
7
7
 
8
8
  import matplotlib.pyplot as plt
9
9
  from matplotlib.gridspec import GridSpec
10
+ from matplotlib.ticker import FixedLocator
10
11
 
11
12
  plt.rcParams['figure.facecolor'] = (0,0,0,0)
12
13
  plt.rcParams['axes.facecolor'] = (0,0,0,0)
13
- grey_color = "#888888"
14
+ plt.rcParams['axes.grid'] = True
14
15
 
15
- # Style général du texte et axes
16
- plt.rcParams['text.color'] = grey_color
17
- plt.rcParams['axes.labelcolor'] = grey_color
18
- plt.rcParams['xtick.color'] = grey_color
19
- plt.rcParams['ytick.color'] = grey_color
20
- plt.rcParams['axes.edgecolor'] = grey_color
21
- plt.rcParams['axes.titlecolor'] = grey_color
16
+ from .pytorch import *
17
+ from .tools import *
22
18
 
23
- # Activation de la grille globale
24
- plt.rcParams['axes.grid'] = True
25
- plt.rcParams['grid.color'] = grey_color
19
+ import numpy as np
20
+
21
+ from tqdm.auto import trange, tqdm
22
+
23
+
24
+ import onnx
25
+ from onnxsim import simplify