NeuralNetworks 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- NeuralNetworks/Dependances/__init__.py +75 -0
- NeuralNetworks/Dependances/matplot.py +25 -0
- NeuralNetworks/Dependances/pytorch.py +111 -0
- NeuralNetworks/MLP/FourierFeatures.py +89 -0
- NeuralNetworks/MLP/Layers.py +31 -0
- NeuralNetworks/MLP/__init__.py +99 -0
- NeuralNetworks/MLP/inference.py +26 -0
- NeuralNetworks/Trainer/__init__.py +51 -0
- NeuralNetworks/Trainer/dynamic_learning_rate.py +79 -0
- NeuralNetworks/Trainer/sample_data.py +19 -0
- NeuralNetworks/Trainer/train.py +75 -0
- NeuralNetworks/UI/Learnings.py +45 -0
- NeuralNetworks/UI/Losses.py +45 -0
- NeuralNetworks/UI/__init__.py +9 -0
- NeuralNetworks/__init__.py +7 -111
- {neuralnetworks-0.2.0.dist-info → neuralnetworks-0.2.2.dist-info}/METADATA +70 -66
- neuralnetworks-0.2.2.dist-info/RECORD +24 -0
- NeuralNetworks/Dependances.py +0 -319
- NeuralNetworks/Latent.py +0 -51
- NeuralNetworks/MLP.py +0 -601
- neuralnetworks-0.2.0.dist-info/RECORD +0 -13
- {neuralnetworks-0.2.0.dist-info → neuralnetworks-0.2.2.dist-info}/WHEEL +0 -0
- {neuralnetworks-0.2.0.dist-info → neuralnetworks-0.2.2.dist-info}/licenses/LICENSE +0 -0
- {neuralnetworks-0.2.0.dist-info → neuralnetworks-0.2.2.dist-info}/top_level.txt +0 -0
NeuralNetworks/Dependances.py
DELETED
|
@@ -1,319 +0,0 @@
|
|
|
1
|
-
# NeuralNetworksBeta - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
-
# Copyright (C) 2025 Alexandre Brun
|
|
3
|
-
# This program is free software: you can redistribute it and/or modify
|
|
4
|
-
# it under the terms of the GNU General Public License as published by
|
|
5
|
-
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
-
# (at your option) any later version.
|
|
7
|
-
|
|
8
|
-
import numpy as np
|
|
9
|
-
import torch
|
|
10
|
-
import torch.optim as optim
|
|
11
|
-
import torch.nn as nn
|
|
12
|
-
import torch.quantization as tq
|
|
13
|
-
from torch.amp import autocast, GradScaler
|
|
14
|
-
from torch.utils.data import TensorDataset, DataLoader
|
|
15
|
-
|
|
16
|
-
import visualtorch
|
|
17
|
-
|
|
18
|
-
from torchmetrics.image import PeakSignalNoiseRatio as PSNR
|
|
19
|
-
from torchvision.transforms import ToTensor, Resize, Compose
|
|
20
|
-
|
|
21
|
-
from PIL import Image
|
|
22
|
-
|
|
23
|
-
import matplotlib.pyplot as plt
|
|
24
|
-
from matplotlib.gridspec import GridSpec
|
|
25
|
-
|
|
26
|
-
import platform
|
|
27
|
-
import copy
|
|
28
|
-
import math
|
|
29
|
-
import subprocess
|
|
30
|
-
import requests
|
|
31
|
-
from io import BytesIO
|
|
32
|
-
import rff
|
|
33
|
-
from tqdm import tqdm
|
|
34
|
-
|
|
35
|
-
from IPython.display import display, clear_output
|
|
36
|
-
|
|
37
|
-
torch.cuda.empty_cache()
|
|
38
|
-
|
|
39
|
-
# --- Device global ---
|
|
40
|
-
# Utilise GPU si disponible, sinon CPU
|
|
41
|
-
|
|
42
|
-
def get_best_device():
|
|
43
|
-
"""
|
|
44
|
-
Détermine automatiquement le meilleur backend PyTorch disponible selon l'OS
|
|
45
|
-
et les GPU présents sur la machine.
|
|
46
|
-
|
|
47
|
-
La priorité dépend du système :
|
|
48
|
-
|
|
49
|
-
macOS (Apple Silicon)
|
|
50
|
-
---------------------
|
|
51
|
-
- Utilise Metal/MPS si disponible.
|
|
52
|
-
- Sinon CPU.
|
|
53
|
-
|
|
54
|
-
Windows
|
|
55
|
-
-------
|
|
56
|
-
- CUDA si une GPU Nvidia CUDA est détectée.
|
|
57
|
-
- Sinon DirectML (AMD / Intel / Nvidia sans CUDA).
|
|
58
|
-
- Sinon CPU.
|
|
59
|
-
|
|
60
|
-
Linux
|
|
61
|
-
-----
|
|
62
|
-
- CUDA (Nvidia)
|
|
63
|
-
- ROCm (AMD)
|
|
64
|
-
- oneAPI / XPU (Intel GPU)
|
|
65
|
-
- Sinon CPU.
|
|
66
|
-
|
|
67
|
-
Retour
|
|
68
|
-
------
|
|
69
|
-
torch.device
|
|
70
|
-
Le device optimal pour exécuter PyTorch selon le matériel détecté.
|
|
71
|
-
|
|
72
|
-
Notes
|
|
73
|
-
-----
|
|
74
|
-
- Sur ROCm, PyTorch expose l'alias "cuda", ce qui est normal.
|
|
75
|
-
- DirectML permet un fallback GPU universel sous Windows.
|
|
76
|
-
- Cette fonction ne lance aucun test de performance, elle se base uniquement
|
|
77
|
-
sur les backends disponibles dans l'installation PyTorch.
|
|
78
|
-
"""
|
|
79
|
-
|
|
80
|
-
os_name = platform.system().lower()
|
|
81
|
-
|
|
82
|
-
# =========== APPLE SILICON (macOS) ===========
|
|
83
|
-
if os_name == "darwin":
|
|
84
|
-
if torch.backends.mps.is_available():
|
|
85
|
-
return torch.device("mps")
|
|
86
|
-
|
|
87
|
-
# =========== WINDOWS ===========
|
|
88
|
-
if os_name == "windows":
|
|
89
|
-
# 1) CUDA
|
|
90
|
-
if torch.cuda.is_available():
|
|
91
|
-
return torch.device("cuda")
|
|
92
|
-
|
|
93
|
-
# =========== LINUX ===========
|
|
94
|
-
if os_name == "linux":
|
|
95
|
-
# 1) CUDA (Nvidia)
|
|
96
|
-
if torch.cuda.is_available():
|
|
97
|
-
return torch.device("cuda")
|
|
98
|
-
# 2) ROCm (AMD)
|
|
99
|
-
elif hasattr(torch.backends, "hip") and torch.backends.hip.is_available():
|
|
100
|
-
return torch.device("cuda")
|
|
101
|
-
|
|
102
|
-
# 3) Intel oneAPI / XPU
|
|
103
|
-
elif hasattr(torch, "xpu") and torch.xpu.is_available():
|
|
104
|
-
return torch.device("xpu")
|
|
105
|
-
|
|
106
|
-
# =========== Unknown OS ===========
|
|
107
|
-
return torch.device("cpu")
|
|
108
|
-
|
|
109
|
-
device = get_best_device()
|
|
110
|
-
|
|
111
|
-
def has_gcc():
|
|
112
|
-
"""Return True if GCC is installed and callable."""
|
|
113
|
-
try:
|
|
114
|
-
r = subprocess.run(
|
|
115
|
-
["gcc", "--version"],
|
|
116
|
-
stdout=subprocess.DEVNULL,
|
|
117
|
-
stderr=subprocess.DEVNULL
|
|
118
|
-
)
|
|
119
|
-
return r.returncode == 0
|
|
120
|
-
except FileNotFoundError:
|
|
121
|
-
return False
|
|
122
|
-
|
|
123
|
-
# --- Paramètres graphiques globaux ---
|
|
124
|
-
# Fond transparent et couleur gris uniforme
|
|
125
|
-
plt.rcParams['figure.facecolor'] = (0,0,0,0)
|
|
126
|
-
plt.rcParams['axes.facecolor'] = (0,0,0,0)
|
|
127
|
-
grey_color = "#888888"
|
|
128
|
-
|
|
129
|
-
# Style général du texte et axes
|
|
130
|
-
plt.rcParams['text.color'] = grey_color
|
|
131
|
-
plt.rcParams['axes.labelcolor'] = grey_color
|
|
132
|
-
plt.rcParams['xtick.color'] = grey_color
|
|
133
|
-
plt.rcParams['ytick.color'] = grey_color
|
|
134
|
-
plt.rcParams['axes.edgecolor'] = grey_color
|
|
135
|
-
plt.rcParams['axes.titlecolor'] = grey_color
|
|
136
|
-
|
|
137
|
-
# Activation de la grille globale
|
|
138
|
-
plt.rcParams['axes.grid'] = True
|
|
139
|
-
plt.rcParams['grid.color'] = grey_color
|
|
140
|
-
|
|
141
|
-
# --- Optimisations CUDA ---
|
|
142
|
-
# Accélération des convolutions et matmul
|
|
143
|
-
torch.backends.cudnn.benchmark = True # optimise selon les tailles de tenseurs
|
|
144
|
-
torch.backends.cudnn.enabled = True
|
|
145
|
-
torch.backends.cuda.matmul.allow_tf32 = True # autorise TF32 (plus rapide sur Ampere+)
|
|
146
|
-
torch.backends.cudnn.allow_tf32 = True
|
|
147
|
-
|
|
148
|
-
# Paramètres autograd
|
|
149
|
-
torch.autograd.set_detect_anomaly(False) # pas d'analyse lourde
|
|
150
|
-
torch.autograd.profiler.profile(enabled=False)
|
|
151
|
-
torch.use_deterministic_algorithms(False) # privilégie la performance à la reproductibilité stricte
|
|
152
|
-
|
|
153
|
-
torch._inductor.config.max_autotune = "max" # config max pour Torch-Inductor
|
|
154
|
-
|
|
155
|
-
# Constantes
|
|
156
|
-
pi = math.pi
|
|
157
|
-
e = math.e
|
|
158
|
-
|
|
159
|
-
# --- Liste des normalisations/activations disponibles ---
|
|
160
|
-
norm_list = {
|
|
161
|
-
"Relu": nn.ReLU(),
|
|
162
|
-
"LeakyRelu": nn.LeakyReLU(),
|
|
163
|
-
"ELU": nn.ELU(),
|
|
164
|
-
"SELU": nn.SELU(),
|
|
165
|
-
"GELU": nn.GELU(),
|
|
166
|
-
"Sigmoid": nn.Sigmoid(),
|
|
167
|
-
"Tanh": nn.Tanh(),
|
|
168
|
-
"Hardtanh": nn.Hardtanh(),
|
|
169
|
-
"Softplus": nn.Softplus(),
|
|
170
|
-
"Softsign": nn.Softsign()
|
|
171
|
-
}
|
|
172
|
-
norms = lambda: print("""
|
|
173
|
-
"Relu"
|
|
174
|
-
"LeakyRelu"
|
|
175
|
-
"ELU"
|
|
176
|
-
"SELU"
|
|
177
|
-
"GELU"
|
|
178
|
-
"Sigmoid"
|
|
179
|
-
"Tanh"
|
|
180
|
-
"Hardtanh"
|
|
181
|
-
"Softplus"
|
|
182
|
-
"Softsign"
|
|
183
|
-
"""
|
|
184
|
-
)
|
|
185
|
-
|
|
186
|
-
# --- Liste des fonctions de perte disponibles ---
|
|
187
|
-
crit_list = {
|
|
188
|
-
"MSE": nn.MSELoss(),
|
|
189
|
-
"L1": nn.L1Loss(),
|
|
190
|
-
"SmoothL1": nn.SmoothL1Loss(),
|
|
191
|
-
"Huber": nn.HuberLoss(),
|
|
192
|
-
"CrossEntropy": nn.CrossEntropyLoss(),
|
|
193
|
-
"KLDiv": nn.KLDivLoss(),
|
|
194
|
-
"PoissonNLL": nn.PoissonNLLLoss(),
|
|
195
|
-
"MultiLabelSoftMargin": nn.MultiLabelSoftMarginLoss(),
|
|
196
|
-
}
|
|
197
|
-
crits = lambda: print("""
|
|
198
|
-
"MSE"
|
|
199
|
-
"L1"
|
|
200
|
-
"SmoothL1"
|
|
201
|
-
"Huber"
|
|
202
|
-
"CrossEntropy"
|
|
203
|
-
"KLDiv"
|
|
204
|
-
"PoissonNLL"
|
|
205
|
-
"MultiLabelSoftMargin"
|
|
206
|
-
"""
|
|
207
|
-
)
|
|
208
|
-
|
|
209
|
-
# --- Création d’un dictionnaire d’optimiseurs ---
|
|
210
|
-
def optim_list(self, learning_rate):
|
|
211
|
-
"""
|
|
212
|
-
Renvoie un dictionnaire d’optimiseurs PyTorch pour le MLP donné.
|
|
213
|
-
|
|
214
|
-
Paramètres
|
|
215
|
-
----------
|
|
216
|
-
self : objet
|
|
217
|
-
Objet contenant `self.model` à optimiser.
|
|
218
|
-
learning_rate : float
|
|
219
|
-
Taux d’apprentissage à appliquer aux optimisateurs.
|
|
220
|
-
|
|
221
|
-
Retour
|
|
222
|
-
------
|
|
223
|
-
dict
|
|
224
|
-
Dictionnaire {nom_optimiseur : instance_optimiseur}.
|
|
225
|
-
"""
|
|
226
|
-
return {
|
|
227
|
-
"Adadelta": optim.Adadelta(self.model.parameters(), lr=learning_rate),
|
|
228
|
-
"Adafactor": optim.Adafactor(self.model.parameters(), lr=learning_rate),
|
|
229
|
-
"Adam": optim.Adam(self.model.parameters(), lr=learning_rate),
|
|
230
|
-
"AdamW": optim.AdamW(self.model.parameters(), lr=learning_rate),
|
|
231
|
-
"Adamax": optim.Adamax(self.model.parameters(), lr=learning_rate),
|
|
232
|
-
"ASGD": optim.ASGD(self.model.parameters(), lr=learning_rate),
|
|
233
|
-
"NAdam": optim.NAdam(self.model.parameters(), lr=learning_rate),
|
|
234
|
-
"RAdam": optim.RAdam(self.model.parameters(), lr=learning_rate),
|
|
235
|
-
"RMSprop": optim.RMSprop(self.model.parameters(), lr=learning_rate),
|
|
236
|
-
"Rprop": optim.Rprop(self.model.parameters(), lr=learning_rate),
|
|
237
|
-
"SGD": optim.SGD(self.model.parameters(), lr=learning_rate)
|
|
238
|
-
}
|
|
239
|
-
|
|
240
|
-
optims = lambda: print("""
|
|
241
|
-
"Adadelta"
|
|
242
|
-
"Adafactor"
|
|
243
|
-
"Adam"
|
|
244
|
-
"AdamW"
|
|
245
|
-
"Adamax"
|
|
246
|
-
"ASGD"
|
|
247
|
-
"NAdam"
|
|
248
|
-
"RAdam"
|
|
249
|
-
"RMSprop"
|
|
250
|
-
"Rprop"
|
|
251
|
-
"SGD"
|
|
252
|
-
"""
|
|
253
|
-
)
|
|
254
|
-
|
|
255
|
-
# --- Fonctions utilitaires ---
|
|
256
|
-
def rglen(list):
|
|
257
|
-
"""
|
|
258
|
-
Renvoie un range correspondant aux indices d’une liste.
|
|
259
|
-
|
|
260
|
-
Paramètres
|
|
261
|
-
----------
|
|
262
|
-
list : list-like
|
|
263
|
-
Objet dont on souhaite obtenir les indices.
|
|
264
|
-
|
|
265
|
-
Retour
|
|
266
|
-
------
|
|
267
|
-
range
|
|
268
|
-
Range Python de 0 à len(list)-1.
|
|
269
|
-
"""
|
|
270
|
-
return range(len(list))
|
|
271
|
-
|
|
272
|
-
def tensorise(obj):
|
|
273
|
-
"""
|
|
274
|
-
Convertit un objet en tenseur PyTorch float32 et l’envoie sur le device global.
|
|
275
|
-
|
|
276
|
-
Paramètres
|
|
277
|
-
----------
|
|
278
|
-
obj : array-like, list, np.ndarray, torch.Tensor
|
|
279
|
-
Objet à convertir en tenseur.
|
|
280
|
-
|
|
281
|
-
Retour
|
|
282
|
-
------
|
|
283
|
-
torch.Tensor
|
|
284
|
-
Tenseur float32 sur le device global (CPU ou GPU).
|
|
285
|
-
|
|
286
|
-
Notes
|
|
287
|
-
-----
|
|
288
|
-
- Harmonise les types pour MLP et autres traitements PyTorch.
|
|
289
|
-
- Assure que les données sont compatibles avec les opérations GPU/CPU.
|
|
290
|
-
"""
|
|
291
|
-
return torch.as_tensor(obj, dtype=torch.float32, device=device)
|
|
292
|
-
|
|
293
|
-
def fPrintDoc(obj):
|
|
294
|
-
"""
|
|
295
|
-
Crée une fonction anonyme qui affiche le docstring d'un objet.
|
|
296
|
-
|
|
297
|
-
Paramètres
|
|
298
|
-
----------
|
|
299
|
-
obj : object
|
|
300
|
-
Tout objet Python possédant un attribut `__doc__`.
|
|
301
|
-
|
|
302
|
-
Retour
|
|
303
|
-
------
|
|
304
|
-
function
|
|
305
|
-
Une fonction sans argument. Lorsqu'on l'appelle, elle affiche
|
|
306
|
-
le docstring de l'objet passé.
|
|
307
|
-
|
|
308
|
-
Exemple
|
|
309
|
-
-------
|
|
310
|
-
>>> def ma_fonction():
|
|
311
|
-
... '''Ceci est le docstring.'''
|
|
312
|
-
... pass
|
|
313
|
-
>>> print_doc = fPrintDoc(ma_fonction)
|
|
314
|
-
>>> print_doc()
|
|
315
|
-
Ceci est le docstring.
|
|
316
|
-
"""
|
|
317
|
-
return lambda: print(obj.__doc__)
|
|
318
|
-
|
|
319
|
-
torch.cuda.empty_cache()
|
NeuralNetworks/Latent.py
DELETED
|
@@ -1,51 +0,0 @@
|
|
|
1
|
-
# NeuralNetworksBeta - Multi-Layer Perceptrons avec encodage Fourier
|
|
2
|
-
# Copyright (C) 2025 Alexandre Brun
|
|
3
|
-
# This program is free software: you can redistribute it and/or modify
|
|
4
|
-
# it under the terms of the GNU General Public License as published by
|
|
5
|
-
# the Free Software Foundation, either version 3 of the License, or
|
|
6
|
-
# (at your option) any later version.
|
|
7
|
-
|
|
8
|
-
from .Dependances import *
|
|
9
|
-
|
|
10
|
-
class Latent(nn.Module):
|
|
11
|
-
def __init__(self, insize, outsize):
|
|
12
|
-
super().__init__()
|
|
13
|
-
|
|
14
|
-
self.insize = insize
|
|
15
|
-
self.outsize = outsize
|
|
16
|
-
|
|
17
|
-
# Start latent conv channels
|
|
18
|
-
channels = 128
|
|
19
|
-
|
|
20
|
-
# ----- Encoder -----
|
|
21
|
-
layers = []
|
|
22
|
-
for k in range(5):
|
|
23
|
-
layers.append(nn.Conv2d(channels, channels // 2, kernel_size=3, padding=1))
|
|
24
|
-
channels = channels // 2
|
|
25
|
-
layers.append(nn.BatchNorm2d(channels))
|
|
26
|
-
layers.append(nn.ReLU(inplace=True))
|
|
27
|
-
layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
|
|
28
|
-
|
|
29
|
-
layers.append(nn.Flatten())
|
|
30
|
-
layers.append(nn.Linear(channels * 1 * 1, outsize)) # adjust if spatial dims not 1x1
|
|
31
|
-
|
|
32
|
-
self.Emodel = nn.Sequential(*layers).to(device)
|
|
33
|
-
|
|
34
|
-
# ----- Decoder -----
|
|
35
|
-
layers = []
|
|
36
|
-
layers.append(nn.Linear(outsize, channels)) # output same number of channels
|
|
37
|
-
layers.append(nn.Unflatten(1, (channels, 1, 1)))
|
|
38
|
-
|
|
39
|
-
for k in range(5):
|
|
40
|
-
layers.append(nn.ConvTranspose2d(channels, channels * 2, kernel_size=3, stride=2, padding=1, output_padding=1))
|
|
41
|
-
channels = channels * 2
|
|
42
|
-
layers.append(nn.BatchNorm2d(channels))
|
|
43
|
-
layers.append(nn.ReLU(inplace=True))
|
|
44
|
-
|
|
45
|
-
self.Dmodel = nn.Sequential(*layers).to(device)
|
|
46
|
-
|
|
47
|
-
def encode(self, image):
|
|
48
|
-
return self.Emodel(image)
|
|
49
|
-
|
|
50
|
-
def decode(self, vector):
|
|
51
|
-
return self.Dmodel(vector)
|