NeuralNetworks 0.2.5__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. NeuralNetworks/MLP/MLP.py +64 -0
  2. NeuralNetworks/MLP/_MLP_tools.py +74 -0
  3. NeuralNetworks/MLP/__init__.py +2 -93
  4. NeuralNetworks/Trainer/Trainer.py +98 -0
  5. NeuralNetworks/Trainer/_Trainer_tools.py +155 -0
  6. NeuralNetworks/Trainer/__init__.py +2 -45
  7. NeuralNetworks/VAE/VAE.py +114 -0
  8. NeuralNetworks/VAE/_VAE_tools.py +6 -0
  9. NeuralNetworks/VAE/__init__.py +8 -0
  10. NeuralNetworks/{Dependances/matplot.py → _Dependances/__init__.py} +12 -12
  11. NeuralNetworks/_Dependances/pytorch.py +125 -0
  12. NeuralNetworks/_Dependances/tools.py +15 -0
  13. NeuralNetworks/_UI/Learnings.py +31 -0
  14. NeuralNetworks/_UI/Losses.py +48 -0
  15. NeuralNetworks/{UI → _UI}/__init__.py +1 -1
  16. NeuralNetworks/_UI/_plot.py +50 -0
  17. NeuralNetworks/__init__.py +6 -7
  18. NeuralNetworks/_shared/__init__.py +8 -0
  19. NeuralNetworks/_shared/module.py +115 -0
  20. {neuralnetworks-0.2.5.dist-info → neuralnetworks-0.2.8.dist-info}/METADATA +8 -15
  21. neuralnetworks-0.2.8.dist-info/RECORD +24 -0
  22. {neuralnetworks-0.2.5.dist-info → neuralnetworks-0.2.8.dist-info}/WHEEL +1 -1
  23. NeuralNetworks/Dependances/__init__.py +0 -75
  24. NeuralNetworks/Dependances/pytorch.py +0 -111
  25. NeuralNetworks/MLP/FourierFeatures.py +0 -89
  26. NeuralNetworks/MLP/Layers.py +0 -31
  27. NeuralNetworks/MLP/inference.py +0 -26
  28. NeuralNetworks/Trainer/dynamic_learning_rate.py +0 -79
  29. NeuralNetworks/Trainer/sample_data.py +0 -19
  30. NeuralNetworks/Trainer/train.py +0 -75
  31. NeuralNetworks/UI/Learnings.py +0 -45
  32. NeuralNetworks/UI/Losses.py +0 -45
  33. neuralnetworks-0.2.5.dist-info/RECORD +0 -20
  34. {neuralnetworks-0.2.5.dist-info → neuralnetworks-0.2.8.dist-info}/licenses/LICENSE +0 -0
  35. {neuralnetworks-0.2.5.dist-info → neuralnetworks-0.2.8.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,5 @@
1
1
  # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
3
  # This program is free software: you can redistribute it and/or modify
4
4
  # it under the terms of the GNU General Public License as published by
5
5
  # the Free Software Foundation, either version 3 of the License, or
@@ -7,19 +7,19 @@
7
7
 
8
8
  import matplotlib.pyplot as plt
9
9
  from matplotlib.gridspec import GridSpec
10
+ from matplotlib.ticker import FixedLocator
10
11
 
11
12
  plt.rcParams['figure.facecolor'] = (0,0,0,0)
12
13
  plt.rcParams['axes.facecolor'] = (0,0,0,0)
13
- grey_color = "#888888"
14
+ plt.rcParams['axes.grid'] = True
14
15
 
15
- # Style général du texte et axes
16
- plt.rcParams['text.color'] = grey_color
17
- plt.rcParams['axes.labelcolor'] = grey_color
18
- plt.rcParams['xtick.color'] = grey_color
19
- plt.rcParams['ytick.color'] = grey_color
20
- plt.rcParams['axes.edgecolor'] = grey_color
21
- plt.rcParams['axes.titlecolor'] = grey_color
16
+ from .pytorch import *
17
+ from .tools import *
22
18
 
23
- # Activation de la grille globale
24
- plt.rcParams['axes.grid'] = True
25
- plt.rcParams['grid.color'] = grey_color
19
+ import numpy as np
20
+
21
+ from tqdm.auto import trange, tqdm
22
+
23
+
24
+ import onnx
25
+ from onnxsim import simplify
@@ -0,0 +1,125 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ import os
9
+
10
+ import platform
11
+
12
+ import torch
13
+ import torch.optim as optim
14
+ import torch.nn as nn
15
+
16
+ from torch.amp import autocast, GradScaler
17
+
18
+ import visualtorch
19
+
20
+ pi2 = 2 * torch.pi
21
+
22
+ def get_best_device ():
23
+
24
+ os_name = platform.system ().lower ()
25
+
26
+ # =========== APPLE SILICON (macOS) ===========
27
+ if os_name == 'darwin':
28
+ if torch.backends.mps.is_available ():
29
+ return torch.device ('mps')
30
+
31
+ # =========== WINDOWS ===========
32
+ if os_name == 'windows':
33
+ # 1) CUDA
34
+ if torch.cuda.is_available ():
35
+ return torch.device ('cuda')
36
+
37
+ # =========== LINUX ===========
38
+ if os_name == 'linux':
39
+ # 1) CUDA (Nvidia)
40
+ if torch.cuda.is_available ():
41
+ return torch.device ('cuda')
42
+ # 2) ROCm (AMD)
43
+ elif hasattr (torch.backends, 'hip') and torch.backends.hip.is_available ():
44
+ return torch.device ('cuda')
45
+
46
+ # 3) Intel oneAPI / XPU
47
+ elif hasattr (torch, 'xpu') and torch.xpu.is_available ():
48
+ return torch.device ('xpu')
49
+
50
+ # =========== Unknown OS ===========
51
+ return torch.device ('cpu')
52
+
53
+ device = get_best_device (); dev = str (device)
54
+ scaler = GradScaler (dev)
55
+
56
+ # --- Optimisations CUDA ---
57
+ # Accélération des convolutions et matmul
58
+ torch.backends.cudnn.enabled = True
59
+ torch.backends.cuda.matmul.allow_tf32 = True
60
+ torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True)
61
+ torch.backends.cudnn.allow_tf32 = True
62
+
63
+ # Paramètres autograd
64
+ torch._inductor.config.max_autotune = 'max'
65
+ torch.set_float32_matmul_precision('medium')
66
+
67
+ def tensorise(obj):
68
+ return torch.as_tensor(obj, dtype=torch.float32)
69
+
70
+ class Container:
71
+ def __init__ (self, dictionnaire : dict):
72
+ self.D = dictionnaire
73
+
74
+ def __repr__ (self):
75
+ return "\n".join(self.D.keys())
76
+
77
+ def get (self, name : str):
78
+ value = self.D.get (str(name))
79
+ if value is None:
80
+ fall_back = list(self.D.keys ())[0]
81
+ print(f"Warning: '{str(name)}' not recognized, falling back to '{fall_back}'")
82
+ value = self.D.get (fall_back)
83
+ return value
84
+
85
+ norms = Container ({
86
+ 'Relu' : nn.ReLU (),
87
+ 'LeakyRelu': nn.LeakyReLU (),
88
+ 'ELU' : nn.ELU (),
89
+ 'SELU' : nn.SELU (),
90
+ 'GELU' : nn.GELU (),
91
+ 'Mish' : nn.Mish (),
92
+ 'Sigmoid' : nn.Sigmoid (),
93
+ 'Tanh' : nn.Tanh (),
94
+ 'Hardtanh' : nn.Hardtanh (),
95
+ 'Softplus' : nn.Softplus (),
96
+ 'Softsign' : nn.Softsign ()
97
+ })
98
+
99
+ crits = Container ({
100
+ 'MSE' : nn.MSELoss (reduction='none'),
101
+ 'L1' : nn.L1Loss (reduction='none'),
102
+ 'SmoothL1' : nn.SmoothL1Loss (reduction='none'),
103
+ 'SoftMarginLoss' : nn.SoftMarginLoss (reduction='none'),
104
+ 'Huber' : nn.HuberLoss (reduction='none'),
105
+ 'CrossEntropy' : nn.CrossEntropyLoss (reduction='none'),
106
+ 'KLDiv' : nn.KLDivLoss (reduction='none'),
107
+ 'PoissonNLL' : nn.PoissonNLLLoss (reduction='none'),
108
+ 'MultiLabelSoftMargin' : nn.MultiLabelSoftMarginLoss (reduction='none')
109
+ })
110
+
111
+ optims = Container ({
112
+ 'Adam' : optim.Adam ,
113
+ 'Adadelta' : optim.Adadelta ,
114
+ 'Adafactor' : optim.Adafactor,
115
+ 'AdamW' : optim.AdamW ,
116
+ 'Adamax' : optim.Adamax ,
117
+ 'ASGD' : optim.ASGD ,
118
+ 'NAdam' : optim.NAdam ,
119
+ 'RAdam' : optim.RAdam ,
120
+ 'RMSprop' : optim.RMSprop ,
121
+ 'Rprop' : optim.Rprop ,
122
+ 'SGD' : optim.SGD
123
+ })
124
+
125
+ torch.cuda.empty_cache ()
@@ -0,0 +1,15 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ import torch
9
+ from .pytorch import device
10
+
11
+ pi = torch.pi
12
+ e = torch.e
13
+
14
+ def rglen(liste : list):
15
+ return range(len(liste))
@@ -0,0 +1,31 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from .._Dependances import plt, np, FixedLocator
9
+ from ._plot import plot, init_plot
10
+
11
+ def learnings (*nets , #
12
+ fig_size : int = 5 , #
13
+ color : str = "#888888"): #
14
+
15
+ fig, ax = init_plot (fig_size, color)
16
+ all_learnings = [[lr for lr in net.learnings] for net in nets]
17
+
18
+ if max (len (lst) for lst in all_learnings) == 1:
19
+ lenlearnings = 2
20
+ else:
21
+ lenlearnings = max (len (lst) for lst in all_learnings)
22
+
23
+ plt.xlim (1, lenlearnings)
24
+ plt.ylim (10 ** (np.floor (np.log10 (np.min (all_learnings)))),
25
+ 10 ** (np.ceil (np.log10 (np.max (all_learnings)))))
26
+
27
+ for k, net in enumerate (nets):
28
+ ax.plot (np.arange(1, len (all_learnings [k]) + 1),
29
+ all_learnings [k], label = net.name)
30
+
31
+ plot (ax, "Epochs", "Taux d'apprentissage", "")
@@ -0,0 +1,48 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from .._Dependances import plt, np, FixedLocator, torch
9
+ from ._plot import plot, init_plot
10
+
11
+ def losses (*nets , #
12
+ fuse_losses : bool = True , #
13
+ names : list = None , #
14
+ fig_size : int = 5 , #
15
+ color : str = "#888888"): #
16
+
17
+ fig, ax = init_plot (fig_size, color)
18
+
19
+ if fuse_losses:
20
+ all_losses = [[np.mean (losses) for losses in net.losses] for net in nets]
21
+ else:
22
+ all_losses = [net.losses for net in nets]
23
+
24
+ if max (len (lst) for lst in all_losses) == 1:
25
+ lenlosses = 2
26
+ else:
27
+ lenlosses = max (len (lst) for lst in all_losses)
28
+
29
+ all_losses = np.array (all_losses)
30
+ plt.xlim (1, lenlosses)
31
+ plt.ylim (10 ** (np.floor (np.log10 (np.min (all_losses)))),
32
+ 10 ** (np.ceil (np.log10 (np.max (all_losses)))))
33
+
34
+ if fuse_losses:
35
+
36
+ for k, net in enumerate (nets):
37
+ ax.plot (np.arange(1, len (all_losses [k]) + 1),
38
+ all_losses[k], label=net.name)
39
+ else:
40
+
41
+ if names is None:
42
+ names = range (all_losses.shape [-1])
43
+ for k, net in enumerate(nets):
44
+ for i in range (all_losses.shape [-1]):
45
+ ax.plot (np.arange (1, len (all_losses [k] [:, i]) + 1),
46
+ all_losses [k] [:, i], label = f"{net.name} : {names [i]}")
47
+
48
+ plot (ax, "Epochs", "Résidus", "")
@@ -1,5 +1,5 @@
1
1
  # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
3
  # This program is free software: you can redistribute it and/or modify
4
4
  # it under the terms of the GNU General Public License as published by
5
5
  # the Free Software Foundation, either version 3 of the License, or
@@ -0,0 +1,50 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from .._Dependances import np, plt, FixedLocator
9
+
10
+ def init_plot (fig_size : int = 5, color : str = "#888888"):
11
+
12
+ plt.rcParams['axes.facecolor'] = (0,0,0,0)
13
+ plt.rcParams ['text.color'] = color
14
+ plt.rcParams ['axes.labelcolor'] = color
15
+ plt.rcParams ['xtick.color'] = color
16
+ plt.rcParams ['ytick.color'] = color
17
+ plt.rcParams ['axes.edgecolor'] = color
18
+ plt.rcParams ['axes.titlecolor'] = color
19
+ plt.rcParams ['grid.color'] = color
20
+
21
+ fig, ax = plt.subplots ()
22
+ fig.set_figheight (fig_size)
23
+ fig.set_figwidth (fig_size)
24
+
25
+ return fig, ax
26
+
27
+ def plot (ax,
28
+ x_title : str = "",
29
+ y_title : str = "",
30
+ title : str = ""):
31
+
32
+ plt.title (title)
33
+ ax.set_xlabel (x_title)
34
+ ax.set_ylabel (y_title)
35
+ ax.legend (loc = "upper right", framealpha = 0, edgecolor = 'none')
36
+
37
+ ticks, minors = list (ax.get_xticks ()), []; ticks [0] = 1
38
+ for i in range( len (ticks) - 1):
39
+ minors.extend (np.linspace (ticks [i], ticks [i+1], 4, endpoint = False) [1:])
40
+
41
+ ax.set_xticks (ticks)
42
+ ax.xaxis.set_minor_locator (FixedLocator (minors))
43
+
44
+ ax.grid (which = "major", axis = "y", linewidth = 1.0)
45
+ ax.grid (which = "minor", axis = "y", linestyle = ":", alpha = 0.5)
46
+ plt.yscale ('log', nonpositive='mask')
47
+
48
+ plt.tight_layout ()
49
+ plt.ion ()
50
+ plt.show ()
@@ -1,18 +1,17 @@
1
1
  # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
3
  # This program is free software: you can redistribute it and/or modify
4
4
  # it under the terms of the GNU General Public License as published by
5
5
  # the Free Software Foundation, either version 3 of the License, or
6
6
  # (at your option) any later version.
7
7
 
8
8
  # Import des dépendances et utilitaires globaux (device, settings, tensorise, etc.)
9
- from .Dependances import norms, crits, optims, rglen, device, pi, e, tensorise
9
+ from ._Dependances import norms, crits, optims, rglen, device, pi, e, tensorise
10
+ from ._UI import losses, learnings
10
11
 
11
- # Modèle MLP principal + fonction d'entraînement associée
12
+ from ._shared import Module
12
13
  from .MLP import MLP
13
-
14
+ from .VAE import VAE
14
15
  from .Trainer import Trainer
15
16
 
16
- from .UI import *
17
-
18
- __version__ = "0.2.5"
17
+ __version__ = "0.2.8"
@@ -0,0 +1,8 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from .module import Module
@@ -0,0 +1,115 @@
1
+ # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
+ # Copyright (C) 2025 - 2026 Alexandre Brun
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+
8
+ from .._Dependances import nn, torch, device, np, onnx, simplify
9
+
10
+ class Module (nn.Module):
11
+ """
12
+ Classe de base pour les modèles.
13
+ Gère le forward entraînement, l'inférence et le suivi des métriques.
14
+ """
15
+
16
+ def __init__ (self,
17
+ _name : str = "Net",
18
+ **Reconstruction_data): # Nom du modèle
19
+ """
20
+ Initialise le module.
21
+ """
22
+ super ().__init__ ()
23
+ self.losses = [] # Tracker des résidus
24
+ self.learnings = [] # Tracker des lrs
25
+ self.name = _name # Nom du module
26
+ self.Reconstruction_data = Reconstruction_data # Données de reconstruction
27
+
28
+
29
+ def _forward (self, x : torch.Tensor):
30
+ """
31
+ Forward interne à implémenter dans les classes filles.
32
+
33
+ Utile pour VAEs :
34
+ Si la sortie d'entrainement est différente de la sortie d'utilisation,
35
+ alors redéfinir train_forward pour l'entrainement.
36
+ """
37
+ raise Exception ("_forward n'est pas défini dans la classe")
38
+
39
+ def train_forward (self, x : torch.Tensor): # Forward de train (avec gradients)
40
+ return self._forward (x)
41
+
42
+ def forward(self, x: torch.Tensor): # Forward d'inférence (sans gradients)
43
+ device = next (self.parameters ()).device
44
+
45
+ with torch.no_grad ():
46
+ x = x.unsqueeze (0) if x.dim () == 1 else x
47
+ x = x.to (device)
48
+ output = self._forward (x)
49
+ return output.cpu ().numpy ().flatten ()
50
+
51
+ @property
52
+ def nb_params (self):
53
+ """
54
+ Affiche le nombre total de paramètres et ceux entraînables.
55
+ """
56
+ total = sum (p.numel () for p in self.parameters ())
57
+ trainable = sum (p.numel () for p in self.parameters () if p.requires_grad)
58
+ print (f"Nombre total de paramètres : {total}")
59
+ print (f"Nombre de paramètres entraînables : {trainable}")
60
+
61
+ @property
62
+ def save(self):
63
+ state_dict = {
64
+ k: v.detach().cpu().numpy()
65
+ for k, v in self.state_dict().items()
66
+ }
67
+ np.savez(f"{self.name}.npz", allow_pickle = False, # Initialise l'enregistement
68
+ **state_dict , # Données d'état
69
+ losses = np.asarray(self.losses) , # Données de loss
70
+ learnings = np.asarray(self.learnings) , # Données de lr
71
+ **self.Reconstruction_data ) # Données de reconstruction
72
+
73
+ @classmethod
74
+ def load(cls, path, device="cpu"):
75
+ data = np.load(path, allow_pickle = False) # Lis les données
76
+ index = list(data.keys()).index('losses') # Indice de séparation
77
+
78
+ obj = cls (**{k: data[k] for k in list(data.keys())[index + 2:]})
79
+
80
+ state_dict = {
81
+ k: torch.from_numpy(data[k]).to(device)
82
+ for k in obj.state_dict().keys()
83
+ }
84
+
85
+ obj.losses = data ["losses"].tolist( ) # Charge les données de loss
86
+ obj.learnings = data ["learnings"].tolist( ) # Charge les données de lr
87
+ obj.load_state_dict (state_dict) # Charge le réseau
88
+
89
+ return obj
90
+
91
+ @property
92
+ def _dummy_input(self):
93
+ raise Exception ("_dummy_input n'est pas défini dans la classe")
94
+
95
+ @property
96
+ def onnx_save (self):
97
+ print ("Sauvegarde en format .onnx")
98
+ torch.onnx.export (self,
99
+ self._dummy_input, # Renseigne la input shape
100
+ f"{self.name}.onnx", # Nom du fichier de sauvegarde
101
+ opset_version = 18, #
102
+ dynamo = True, #
103
+ input_names = ["inputs"], # Nom des inputs
104
+ output_names = ["outputs"], # Nom des outputs
105
+ dynamic_shapes = {
106
+ "x": {0: torch.export.Dim("batch_size")} #
107
+ }
108
+ )
109
+
110
+ print ("Simplification du .onnx")
111
+ onnx_model_simp, check = simplify (onnx.load (f"{self.name}.onnx"))
112
+ assert check
113
+
114
+ onnx.save (onnx_model_simp, f"{self.name}_simplified.onnx")
115
+ print ("Fin de l'enregistrement")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: NeuralNetworks
3
- Version: 0.2.5
3
+ Version: 0.2.8
4
4
  Summary: Multi-Layer Perceptrons with multi-Fourier encoding, variable learning rate, visualization and PyTorch compilation
5
5
  Author-email: Alexandre Brun <alexandre51160@gmail.com>
6
6
  License: GPL-3.0-or-later
@@ -12,20 +12,13 @@ Classifier: Operating System :: OS Independent
12
12
  Requires-Python: >=3.9
13
13
  Description-Content-Type: text/markdown
14
14
  License-File: LICENSE
15
- Requires-Dist: numpy>=1.25
16
- Requires-Dist: matplotlib>=3.10
17
- Requires-Dist: tqdm>=4.66
18
- Requires-Dist: torch<3.0,>=2.9.1
19
- Requires-Dist: torchvision<1.0,>=0.24
20
- Requires-Dist: torchaudio<3.0,>=2.9
21
- Requires-Dist: torchmetrics>=1.8
22
- Requires-Dist: visualtorch>=0.2
23
- Requires-Dist: random-fourier-features-pytorch>=1.0
24
- Requires-Dist: IPython>=8.16
25
- Requires-Dist: requests
26
- Requires-Dist: airfrans
27
- Requires-Dist: scipy
28
- Requires-Dist: pandas
15
+ Requires-Dist: torch
16
+ Requires-Dist: numpy
17
+ Requires-Dist: matplotlib
18
+ Requires-Dist: onnx
19
+ Requires-Dist: onnx-simplifier
20
+ Requires-Dist: tqdm
21
+ Requires-Dist: visualtorch
29
22
  Dynamic: license-file
30
23
 
31
24
  # NeuralNetworks Module
@@ -0,0 +1,24 @@
1
+ NeuralNetworks/__init__.py,sha256=FPSrB6FO7PmkWDgNhtohqAzWrSFUMTtl3GHrlRuO6n4,679
2
+ NeuralNetworks/MLP/MLP.py,sha256=x6tpk-0Am628sOF1Bw6w4dcLCWe0uxDqCL4sMPNG1Qs,3333
3
+ NeuralNetworks/MLP/_MLP_tools.py,sha256=gzOd--UPWHtV-ilBHl7PEVZxIP-1LGDQkS3gzsH6ipA,2590
4
+ NeuralNetworks/MLP/__init__.py,sha256=mRd_O5i1o_xnMh0gHM1FYSWYlv2F_ghPCWntMc_6N88,377
5
+ NeuralNetworks/Trainer/Trainer.py,sha256=8D4kiXBzk2jF00lAX-zyRL3rTgT_C-cN5mCZQrwq4PQ,5673
6
+ NeuralNetworks/Trainer/_Trainer_tools.py,sha256=cCgxcKjYPhsMV1ByfPY7iiBNONQ0iVktzKuLq834RpM,5645
7
+ NeuralNetworks/Trainer/__init__.py,sha256=5xvMMe7B96sfPEvdEVMOIC_hgKwOrdsIiE7KdtCMLtM,385
8
+ NeuralNetworks/VAE/VAE.py,sha256=hKh2rJX-If-N41pZXZXur8az0GAaCSbpYNfa5Aq-u4w,4458
9
+ NeuralNetworks/VAE/_VAE_tools.py,sha256=sR1nUm2DAdCpRt07vo8TZ9OEzvnnsbHETvVN3BrtLTE,355
10
+ NeuralNetworks/VAE/__init__.py,sha256=O8Aa2CldKTWPQKuC6-597aEiE3I9wAmEias4Yuuaars,386
11
+ NeuralNetworks/_Dependances/__init__.py,sha256=FD77Mr309XidFBHBdaUywt1mFvMlggbta5lZbTXkQ4w,741
12
+ NeuralNetworks/_Dependances/pytorch.py,sha256=wYJ4GymdEDWiD_se3IPlSakOYrQIqGBkUSjH49VS8Sg,3996
13
+ NeuralNetworks/_Dependances/tools.py,sha256=UoqyLVzgdKJcXk5lJzXjU7Z9ksMXVObVpSwAbuiezjQ,479
14
+ NeuralNetworks/_UI/Learnings.py,sha256=uizcl2D4nWGM_IBeAbbotSUOfnSZKBlN54gdx6mEUCc,1181
15
+ NeuralNetworks/_UI/Losses.py,sha256=nepuUWLaO1OmBBsy6Yv-YZsjva_-dHh4zR4y2NQSt0s,1731
16
+ NeuralNetworks/_UI/__init__.py,sha256=1RW0P85U59OUnEguD3Z8jrUX1uaF3xp0KgVwL-sy0l8,416
17
+ NeuralNetworks/_UI/_plot.py,sha256=x-KGr6VljIKcQ84XdBI7Ft63eNseHorg56K3ELGlecU,1772
18
+ NeuralNetworks/_shared/__init__.py,sha256=bShz5M41mC9Rz_tGex5W_4v0iFi0YFxgSo5x2gLRxKs,383
19
+ NeuralNetworks/_shared/module.py,sha256=hU6odU1syrzi3eMYG7xNchIhVaUDvX13-HIr9p4APoM,4648
20
+ neuralnetworks-0.2.8.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
21
+ neuralnetworks-0.2.8.dist-info/METADATA,sha256=SIC3XH77VIVO8OehohtmfA57naZ90lCD_-z41XqhwuE,18086
22
+ neuralnetworks-0.2.8.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
23
+ neuralnetworks-0.2.8.dist-info/top_level.txt,sha256=h18nmC1BX7avyAAwKh0OQWezxgXmOpmVtbFq-8Mcbms,15
24
+ neuralnetworks-0.2.8.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,75 +0,0 @@
1
- # NeuralNetworks - Multi-Layer Perceptrons avec encodage Fourier
2
- # Copyright (C) 2026 Alexandre Brun
3
- # This program is free software: you can redistribute it and/or modify
4
- # it under the terms of the GNU General Public License as published by
5
- # the Free Software Foundation, either version 3 of the License, or
6
- # (at your option) any later version.
7
-
8
- from .matplot import *
9
- from .pytorch import *
10
-
11
- import numpy as np
12
- from PIL import Image
13
-
14
- import copy
15
- import subprocess
16
- import requests
17
- from io import BytesIO
18
- from tqdm import tqdm
19
- import plotly.graph_objects as go
20
- from IPython.display import display, clear_output
21
-
22
- from scipy.interpolate import griddata
23
- from sklearn.model_selection import train_test_split
24
-
25
- import math
26
- pi = math.pi
27
- e = math.e
28
-
29
- norms = lambda: print("""
30
- "Relu"
31
- "LeakyRelu"
32
- "ELU"
33
- "SELU"
34
- "GELU"
35
- "Mish"
36
- "Sigmoid"
37
- "Tanh"
38
- "Hardtanh"
39
- "Softplus"
40
- "Softsign"
41
- """
42
- )
43
-
44
- crits = lambda: print("""
45
- "MSE"
46
- "L1"
47
- "SmoothL1"
48
- "Huber"
49
- "CrossEntropy"
50
- "KLDiv"
51
- "PoissonNLL"
52
- "MultiLabelSoftMargin"
53
- """
54
- )
55
-
56
- optims = lambda: print("""
57
- "Adadelta"
58
- "Adafactor"
59
- "Adam"
60
- "AdamW"
61
- "Adamax"
62
- "ASGD"
63
- "NAdam"
64
- "RAdam"
65
- "RMSprop"
66
- "Rprop"
67
- "SGD"
68
- """
69
- )
70
-
71
- def rglen(list):
72
- return range(len(list))
73
-
74
- def fPrintDoc(obj):
75
- return lambda: print(obj.__doc__)