NeuralNetworks 0.2.0__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,75 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from .matplot import *
9
+ from .pytorch import *
10
+
11
+ import numpy as np
12
+ from PIL import Image
13
+
14
+ import copy
15
+ import subprocess
16
+ import requests
17
+ from io import BytesIO
18
+ from tqdm import tqdm
19
+ import plotly.graph_objects as go
20
+ from IPython.display import display, clear_output
21
+
22
+ from scipy.interpolate import griddata
23
+ from sklearn.model_selection import train_test_split
24
+
25
+ import math
26
+ pi = math.pi
27
+ e = math.e
28
+
29
+ norms = lambda: print("""
30
+ "Relu"
31
+ "LeakyRelu"
32
+ "ELU"
33
+ "SELU"
34
+ "GELU"
35
+ "Mish"
36
+ "Sigmoid"
37
+ "Tanh"
38
+ "Hardtanh"
39
+ "Softplus"
40
+ "Softsign"
41
+ """
42
+ )
43
+
44
+ crits = lambda: print("""
45
+ "MSE"
46
+ "L1"
47
+ "SmoothL1"
48
+ "Huber"
49
+ "CrossEntropy"
50
+ "KLDiv"
51
+ "PoissonNLL"
52
+ "MultiLabelSoftMargin"
53
+ """
54
+ )
55
+
56
+ optims = lambda: print("""
57
+ "Adadelta"
58
+ "Adafactor"
59
+ "Adam"
60
+ "AdamW"
61
+ "Adamax"
62
+ "ASGD"
63
+ "NAdam"
64
+ "RAdam"
65
+ "RMSprop"
66
+ "Rprop"
67
+ "SGD"
68
+ """
69
+ )
70
+
71
+ def rglen(list):
72
+ return range(len(list))
73
+
74
+ def fPrintDoc(obj):
75
+ return lambda: print(obj.__doc__)
@@ -0,0 +1,25 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ import matplotlib.pyplot as plt
9
+ from matplotlib.gridspec import GridSpec
10
+
11
+ plt.rcParams['figure.facecolor'] = (0,0,0,0)
12
+ plt.rcParams['axes.facecolor'] = (0,0,0,0)
13
+ grey_color = "#888888"
14
+
15
+ # Style général du texte et axes
16
+ plt.rcParams['text.color'] = grey_color
17
+ plt.rcParams['axes.labelcolor'] = grey_color
18
+ plt.rcParams['xtick.color'] = grey_color
19
+ plt.rcParams['ytick.color'] = grey_color
20
+ plt.rcParams['axes.edgecolor'] = grey_color
21
+ plt.rcParams['axes.titlecolor'] = grey_color
22
+
23
+ # Activation de la grille globale
24
+ plt.rcParams['axes.grid'] = True
25
+ plt.rcParams['grid.color'] = grey_color
@@ -0,0 +1,111 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ import os
9
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
10
+
11
+ import platform
12
+
13
+ import torch
14
+ import torch.optim as optim
15
+ import torch.nn as nn
16
+ import torch.quantization as tq
17
+ from torch.amp import autocast, GradScaler
18
+ from torch.utils.data import TensorDataset, DataLoader
19
+
20
+ from torchmetrics.image import PeakSignalNoiseRatio as PSNR
21
+ from torchvision.transforms import ToTensor, Resize, Compose
22
+
23
+ torch.cuda.empty_cache()
24
+ def get_best_device():
25
+
26
+ os_name = platform.system().lower()
27
+
28
+ # =========== APPLE SILICON (macOS) ===========
29
+ if os_name == "darwin":
30
+ if torch.backends.mps.is_available():
31
+ return torch.device("mps")
32
+
33
+ # =========== WINDOWS ===========
34
+ if os_name == "windows":
35
+ # 1) CUDA
36
+ if torch.cuda.is_available():
37
+ return torch.device("cuda")
38
+
39
+ # =========== LINUX ===========
40
+ if os_name == "linux":
41
+ # 1) CUDA (Nvidia)
42
+ if torch.cuda.is_available():
43
+ return torch.device("cuda")
44
+ # 2) ROCm (AMD)
45
+ elif hasattr(torch.backends, "hip") and torch.backends.hip.is_available():
46
+ return torch.device("cuda")
47
+
48
+ # 3) Intel oneAPI / XPU
49
+ elif hasattr(torch, "xpu") and torch.xpu.is_available():
50
+ return torch.device("xpu")
51
+
52
+ # =========== Unknown OS ===========
53
+ return torch.device("cpu")
54
+ device = get_best_device()
55
+
56
+ # --- Optimisations CUDA ---
57
+ # Accélération des convolutions et matmul
58
+ torch.backends.cudnn.benchmark = True
59
+ torch.backends.cudnn.enabled = True
60
+ torch.backends.cuda.matmul.allow_tf32 = True
61
+ torch.backends.cudnn.allow_tf32 = True
62
+
63
+ # Paramètres autograd
64
+ torch.autograd.set_detect_anomaly(False)
65
+ torch.autograd.profiler.profile(enabled=False)
66
+ torch.use_deterministic_algorithms(False)
67
+
68
+ torch._inductor.config.max_autotune = "max"
69
+
70
+ norm_list = {
71
+ "Relu": nn.ReLU(),
72
+ "LeakyRelu": nn.LeakyReLU(),
73
+ "ELU": nn.ELU(),
74
+ "SELU": nn.SELU(),
75
+ "GELU": nn.GELU(),
76
+ "Mish": nn.Mish(),
77
+ "Sigmoid": nn.Sigmoid(),
78
+ "Tanh": nn.Tanh(),
79
+ "Hardtanh": nn.Hardtanh(),
80
+ "Softplus": nn.Softplus(),
81
+ "Softsign": nn.Softsign()
82
+ }
83
+
84
+ crit_list = {
85
+ "MSE": nn.MSELoss(),
86
+ "L1": nn.L1Loss(),
87
+ "SmoothL1": nn.SmoothL1Loss(),
88
+ "Huber": nn.HuberLoss(),
89
+ "CrossEntropy": nn.CrossEntropyLoss(),
90
+ "KLDiv": nn.KLDivLoss(),
91
+ "PoissonNLL": nn.PoissonNLLLoss(),
92
+ "MultiLabelSoftMargin": nn.MultiLabelSoftMarginLoss()
93
+ }
94
+
95
+ def optim_list(params):
96
+ return {
97
+ "Adadelta": optim.Adadelta(params),
98
+ "Adafactor": optim.Adafactor(params),
99
+ "Adam": optim.Adam(params),
100
+ "AdamW": optim.AdamW(params),
101
+ "Adamax": optim.Adamax(params),
102
+ "ASGD": optim.ASGD(params),
103
+ "NAdam": optim.NAdam(params),
104
+ "RAdam": optim.RAdam(params),
105
+ "RMSprop": optim.RMSprop(params),
106
+ "Rprop": optim.Rprop(params),
107
+ "SGD": optim.SGD(params)
108
+ }
109
+
110
+ def tensorise(obj):
111
+ return torch.as_tensor(obj, dtype=torch.float32, device='cpu')
@@ -0,0 +1,89 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from ..Dependances import torch, nn, np
9
+
10
+ class FourierEncoding (nn.Module):
11
+ """
12
+ Encodage de Fourier aléatoire pour enrichir la représentation des entrées.
13
+
14
+ Cette couche projette les entrées dans un espace fréquentiel à l'aide
15
+ d'une matrice de projection apprise, puis applique des fonctions sinus
16
+ et cosinus afin de capturer des variations à haute fréquence.
17
+
18
+ Parameters
19
+ ----------
20
+ nb_fourier : int
21
+ Nombre de composantes de Fourier.
22
+ input_size : int
23
+ Dimension des entrées.
24
+ sigma : float
25
+ Écart-type utilisé pour l'initialisation de la matrice de projection.
26
+ """
27
+ def __init__ (self, nb_fourier, input_size, sigma):
28
+ super ().__init__ ()
29
+ self.B = nn.Parameter (torch.randn (nb_fourier, input_size) * sigma)
30
+
31
+ def forward (self, x):
32
+ """
33
+ Applique l'encodage de Fourier aux entrées.
34
+
35
+ Parameters
36
+ ----------
37
+ x : torch.Tensor
38
+ Tensor d'entrée de shape `(N, input_size)`.
39
+
40
+ Returns
41
+ -------
42
+ torch.Tensor
43
+ Tensor encodé de shape `(N, 2 * nb_fourier)`, correspondant
44
+ à la concaténation des cosinus et sinus.
45
+ """
46
+ vp = 2 * np.pi * x @ self.B.T
47
+ return torch.cat ((torch.cos (vp), torch.sin (vp)), dim = -1)
48
+
49
+ def encode (input_size, output_size, sigmas, fourier_input_size, nb_fourier):
50
+ """
51
+ Construit les modules d'encodage (Fourier ou identité) et la couche de fusion associée.
52
+
53
+ Si `sigmas` est `None`, aucun encodage de Fourier n'est appliqué et les
54
+ entrées sont transmises directement au réseau.
55
+ Sinon, plusieurs encodages de Fourier sont créés (un par sigma), et
56
+ leurs sorties sont fusionnées via une couche linéaire.
57
+
58
+ Parameters
59
+ ----------
60
+ input_size : int
61
+ Dimension des entrées.
62
+ output_size : int
63
+ Dimension de sortie du réseau.
64
+ sigmas : list[float] ou None
65
+ Liste des paramètres sigma pour les encodages de Fourier.
66
+ fourier_input_size : int
67
+ Dimension attendue après encodage (non utilisée directement ici,
68
+ mais conservée pour cohérence avec l'architecture globale).
69
+ nb_fourier : int
70
+ Nombre de composantes de Fourier par encodage.
71
+
72
+ Returns
73
+ -------
74
+ encodings : torch.nn.ModuleList (scripté)
75
+ Liste des modules d'encodage (Fourier ou identité).
76
+ f : torch.nn.Module (scripté)
77
+ Module de fusion des encodages (identité ou couche linéaire).
78
+ """
79
+ if sigmas is None:
80
+ encodings = nn.ModuleList (
81
+ [nn.Identity ()]
82
+ )
83
+ f = nn.Identity ()
84
+ else:
85
+ encodings = nn.ModuleList (
86
+ [FourierEncoding (nb_fourier, input_size, sigma) for sigma in sigmas]
87
+ )
88
+ f = nn.Linear (len (encodings) * output_size, output_size)
89
+ return torch.jit.script (encodings), torch.jit.script (f)
@@ -0,0 +1,31 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from ..Dependances import torch, nn
9
+
10
+ def create_layers(
11
+ input_size,
12
+ output_size,
13
+ hidden_layers,
14
+ sigmas,
15
+ fourier_input_size,
16
+ nb_fourier,
17
+ norm):
18
+
19
+ layer_list = [
20
+ nn.Linear (input_size if sigmas is None else 2 * nb_fourier, hidden_layers [0]),
21
+ norm
22
+ ]
23
+
24
+ for k in range (len (hidden_layers) - 1):
25
+ layer_list.extend ([
26
+ nn.Linear (hidden_layers [k], hidden_layers [k + 1]),
27
+ norm
28
+ ])
29
+ layer_list.append (nn.Linear (hidden_layers [-1], output_size))
30
+
31
+ return torch.jit.script (nn.Sequential (*layer_list))
@@ -0,0 +1,99 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from ..Dependances import norm_list, nn
9
+ from .FourierFeatures import encode
10
+ from .Layers import create_layers
11
+ from .inference import infer
12
+
13
+ class MLP (nn.Module):
14
+ """
15
+ Multi-Layer Perceptron avec encodage de Fourier optionnel.
16
+
17
+ Cette classe implémente un MLP configurable, pouvant intégrer un ou
18
+ plusieurs encodages de Fourier en entrée afin d'améliorer la capacité
19
+ de représentation sur des signaux à haute fréquence.
20
+
21
+ Parameters
22
+ ----------
23
+ input_size : int, optional
24
+ Dimension des entrées du réseau. Default: 1.
25
+ output_size : int, optional
26
+ Dimension des sorties du réseau. Default: 1.
27
+ hidden_layers : list[int], optional
28
+ Liste des tailles des couches cachées. Default: [1].
29
+ sigmas : list[float] ou None, optional
30
+ Paramètres sigma pour les encodages de Fourier. Si `None`,
31
+ aucun encodage de Fourier n'est utilisé.
32
+ fourier_input_size : int, optional
33
+ WIP
34
+ nb_fourier : int, optional
35
+ Nombre de composantes de Fourier par encodage. Default: 8.
36
+ norm : str, optional
37
+ Nom de la fonction d'activation à utiliser. Default: "Relu".
38
+ name : str, optional
39
+ Nom du réseau. Default: "Net".
40
+ """
41
+
42
+ def __init__ (
43
+ self,
44
+ input_size = 1,
45
+ output_size = 1,
46
+ hidden_layers = [1],
47
+ sigmas = None,
48
+ fourier_input_size = 2,
49
+ nb_fourier = 8,
50
+ norm = "Relu",
51
+ name = "Net"):
52
+ super ().__init__ ()
53
+
54
+ # --- Activation ---
55
+ self.norm = norm_list.get (norm)
56
+ if self.norm is None:
57
+ print (f"Warning: '{norm}' not recognized, falling back to 'm is'")
58
+ self.norm = norm_list.get ("Relu")
59
+
60
+ # --- Attributs ---
61
+ self.losses = []
62
+ self.learnings = []
63
+ self.name = name
64
+
65
+ ## --- Encodage Fourier ou passthrough ---
66
+ self.encodings, self.f = encode (
67
+ input_size,
68
+ output_size,
69
+ sigmas,
70
+ fourier_input_size,
71
+ nb_fourier
72
+ )
73
+
74
+ # --- Construction du réseau ---
75
+ self.model = create_layers (
76
+ input_size,
77
+ output_size,
78
+ hidden_layers,
79
+ sigmas,
80
+ fourier_input_size,
81
+ nb_fourier,
82
+ self.norm
83
+ )
84
+
85
+ def forward (self, x):
86
+ """
87
+ Effectue une passe avant du réseau.
88
+
89
+ Parameters
90
+ ----------
91
+ x : torch.Tensor
92
+ Entrées du réseau de shape `(N, input_size)`.
93
+
94
+ Returns
95
+ -------
96
+ torch.Tensor
97
+ Sortie du MLP de shape `(N, output_size)`.
98
+ """
99
+ return infer (self, x)
@@ -0,0 +1,26 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from ..Dependances import torch, np, device
9
+
10
+ def infer (net, x):
11
+ with torch.no_grad ():
12
+ x = x.unsqueeze (0) if x.dim () == 1 else x
13
+
14
+ net = net.to (device)
15
+ x = x.to (device)
16
+ results_list = [net.model (encoding (x)) for encoding in net.encodings]
17
+ x = x.to ('cpu')
18
+
19
+ output = np.array (
20
+ net.f (
21
+ torch.cat (results_list, dim = 1)
22
+ ).cpu ().numpy ().flatten ()
23
+ )
24
+ net = net.to ('cpu')
25
+
26
+ return output
@@ -0,0 +1,51 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from ..Dependances import crit_list, optim_list
9
+ from .train import train_f
10
+ from .sample_data import sample_data
11
+
12
+ class Trainer:
13
+
14
+ def __init__(self,
15
+ *nets,
16
+ inputs,
17
+ outputs,
18
+ test_size = None,
19
+ optim = 'Adam',
20
+ init_lr = 0.01,
21
+ crit = 'MSE',
22
+ batch_size = float):
23
+
24
+ self.batch_size = batch_size
25
+ self.nets = nets
26
+ self.init_lr = init_lr
27
+
28
+ self.X_train, self.X_test, self.y_train, self.y_test = sample_data (
29
+ inputs,
30
+ outputs,
31
+ test_size
32
+ )
33
+
34
+ # --- Fonction de perte ---
35
+ self.crit = crit_list.get(crit)
36
+ if self.crit is None:
37
+ print(f"Warning: '{self.crit}' not recognized, falling back to 'MSE'")
38
+ self.crit = crit_list.get("MSE")
39
+
40
+ # --- Sélection de l’optimiseur ---
41
+ self.optims = []
42
+ for net in nets:
43
+ params = [{"params": net.parameters(), "lr": self.init_lr}]
44
+ new_optim = optim_list(params).get(optim)
45
+ if new_optim is None:
46
+ print(f"Warning: '{optim}' not recognized, falling back to 'Adam'")
47
+ new_optim = optim_list(params).get("Adam")
48
+ self.optims.append(new_optim)
49
+
50
+ def train (self, num_epochs = 1500, activate_tqdm = True):
51
+ train_f (self, num_epochs, activate_tqdm)
@@ -0,0 +1,79 @@
1
+ # NeuralNetworks- Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from ..Dependances import np
9
+
10
+ def generate_learning_rate (
11
+ Nb_iter,
12
+ X0,
13
+ mode = "smoother",
14
+ first = 0.4,
15
+ second = 1,
16
+ Xi = 5e-4,
17
+ Xf = 1e-6):
18
+
19
+ infl = int (first * Nb_iter)
20
+ Plat = int (second * Nb_iter)
21
+
22
+ def smoothstep (x0, xa, n, m):
23
+ values = []
24
+ if m == "smooth":
25
+ for i in range (n):
26
+ t = i / (n - 1) # t dans [0, 1]
27
+ s = t * t * (3 - 2 * t) # smoothstep
28
+ x = x0 + (xa - x0) * s
29
+ values.append (x)
30
+ elif m == "smoother":
31
+ for i in range(n):
32
+ t = i / (n - 1) # t dans [0, 1]
33
+ s = t * t * t * (t * (6 * t - 15) + 10)
34
+ x = x0 + (xa - x0) * s
35
+ values.append(x)
36
+ else:
37
+ raise ValueError("mode doit être 'smooth' ou 'smoother'")
38
+ return values
39
+
40
+ cuv1 = smoothstep (X0, Xi, infl, mode)
41
+ cuv2 = smoothstep (Xi, Xf, Plat - infl, mode)
42
+ cuv3 = [Xf for _ in range (Plat, Nb_iter)]
43
+
44
+ return np.array (cuv1 + cuv2 + cuv3)
45
+
46
+ def update_lr (losses, lrs, epoch, lr):
47
+
48
+ loss = losses[-1] + (losses[-1] - losses[0])/len(losses)
49
+
50
+ n = 9
51
+ # Points de contrôle (multiplicité finale = dérivée nulle)
52
+ P = np.array([
53
+ 0.0,
54
+ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
55
+ 1.0, 1.0
56
+ ])
57
+
58
+ # Coefficients binomiaux (précomputés UNE FOIS)
59
+ C = np.array([1, 9, 36, 84, 126, 126, 84, 36, 9, 1], dtype=float)
60
+
61
+ x = np.clip(loss, 0.0, 1.0)
62
+ t = np.sqrt(x)
63
+
64
+ u = 1.0 - t
65
+
66
+ # Bernstein vectorisé
67
+ y = (
68
+ C[0] * u**9 * P[0] +
69
+ C[1] * u**8 * t * P[1] +
70
+ C[2] * u**7 * t**2 * P[2] +
71
+ C[3] * u**6 * t**3 * P[3] +
72
+ C[4] * u**5 * t**4 * P[4] +
73
+ C[5] * u**4 * t**5 * P[5] +
74
+ C[6] * u**3 * t**6 * P[6] +
75
+ C[7] * u**2 * t**7 * P[7] +
76
+ C[8] * u * t**8 * P[8] +
77
+ C[9] * t**9 * P[9]
78
+ )
79
+ return np.clip (max(0.001 * y, lrs [epoch]), 0.0, lr)
@@ -0,0 +1,19 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from ..Dependances import train_test_split
9
+
10
+ def sample_data (inputs, outputs, test_size):
11
+ if test_size is None:
12
+ return inputs, inputs, outputs, outputs
13
+ else:
14
+ return train_test_split (
15
+ inputs,
16
+ outputs,
17
+ test_size = test_size,
18
+ random_state = 42
19
+ )
@@ -0,0 +1,75 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from ..Dependances import torch, GradScaler, device, tqdm, autocast
9
+
10
+ from .dynamic_learning_rate import generate_learning_rate, update_lr
11
+
12
+ def train_f (Trainer, num_epochs = 1, activate_tqdm = True):
13
+ dev = str (device)
14
+ scaler = GradScaler (dev)
15
+
16
+ lrs = generate_learning_rate (num_epochs, Trainer.init_lr)
17
+
18
+ Trainer.X_train = Trainer.X_train.to (device)
19
+ Trainer.y_train = Trainer.y_train.to (device)
20
+ n_samples = Trainer.X_train.size (0)
21
+
22
+ torch.cuda.empty_cache ()
23
+ for k, net in enumerate (Trainer.nets):
24
+ net = net.to (device)
25
+ net.learnings.append(Trainer.init_lr)
26
+
27
+ pbar = tqdm (
28
+ range (num_epochs),
29
+ desc = f"train epoch",
30
+ disable = not (activate_tqdm)
31
+ )
32
+
33
+ for epoch in pbar:
34
+ # Génération d'un ordre aléatoire des indices
35
+ perm = torch.randperm (n_samples, device = device)
36
+ epoch_loss = 0.0
37
+
38
+ # --- Parcours des mini-batchs ---
39
+ for i in range (0, n_samples, Trainer.batch_size):
40
+ idx = perm [i : i + Trainer.batch_size]
41
+
42
+ # Fonction interne calculant la perte et les gradients
43
+ def closure ():
44
+ Trainer.optims [k].zero_grad (set_to_none = True)
45
+ with autocast (dev):
46
+ loss = Trainer.crit (
47
+ net.f (
48
+ torch.cat (
49
+ [net.model (encoding (Trainer.X_train [idx]))for encoding in net.encodings],
50
+ dim = 1
51
+ )
52
+ ),
53
+ Trainer.y_train[idx]
54
+ )
55
+ scaler.scale (loss).backward ()
56
+ return loss
57
+
58
+ epoch_loss += closure()
59
+ scaler.step (Trainer.optims [k])
60
+ scaler.update ()
61
+
62
+ # --- Stockage de la perte de l'époque ---
63
+ #Trainer.frequencies.append(net.encodings[0].B.detach().cpu().clone())
64
+ net.losses.append (epoch_loss.item ())
65
+ net.learnings.append (update_lr (net.losses [-20:], lrs, epoch, net.learnings[-1]))
66
+ for param_group in Trainer.optims [k].param_groups:
67
+ param_group ['lr'] = net.learnings[-1]
68
+
69
+ pbar.set_postfix(loss=f"{epoch_loss:.5f}",lr=f"{net.learnings[-1]:.5f}")
70
+
71
+ net = net.to ('cpu')
72
+ net.learnings.pop(-1)
73
+ Trainer.X_train = Trainer.X_train.to ('cpu')
74
+ Trainer.y_train = Trainer.y_train.to ('cpu')
75
+ torch.cuda.empty_cache ()