MEDfl 0.2.1__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. MEDfl/LearningManager/__init__.py +13 -13
  2. MEDfl/LearningManager/client.py +150 -181
  3. MEDfl/LearningManager/dynamicModal.py +287 -287
  4. MEDfl/LearningManager/federated_dataset.py +60 -60
  5. MEDfl/LearningManager/flpipeline.py +192 -192
  6. MEDfl/LearningManager/model.py +223 -223
  7. MEDfl/LearningManager/params.yaml +14 -14
  8. MEDfl/LearningManager/params_optimiser.py +442 -442
  9. MEDfl/LearningManager/plot.py +229 -229
  10. MEDfl/LearningManager/server.py +181 -189
  11. MEDfl/LearningManager/strategy.py +82 -138
  12. MEDfl/LearningManager/utils.py +331 -331
  13. MEDfl/NetManager/__init__.py +10 -10
  14. MEDfl/NetManager/database_connector.py +43 -43
  15. MEDfl/NetManager/dataset.py +92 -92
  16. MEDfl/NetManager/flsetup.py +320 -320
  17. MEDfl/NetManager/net_helper.py +254 -254
  18. MEDfl/NetManager/net_manager_queries.py +142 -142
  19. MEDfl/NetManager/network.py +194 -194
  20. MEDfl/NetManager/node.py +184 -184
  21. MEDfl/__init__.py +4 -3
  22. MEDfl/scripts/__init__.py +1 -1
  23. MEDfl/scripts/base.py +29 -29
  24. MEDfl/scripts/create_db.py +126 -126
  25. Medfl/LearningManager/__init__.py +13 -0
  26. Medfl/LearningManager/client.py +150 -0
  27. Medfl/LearningManager/dynamicModal.py +287 -0
  28. Medfl/LearningManager/federated_dataset.py +60 -0
  29. Medfl/LearningManager/flpipeline.py +192 -0
  30. Medfl/LearningManager/model.py +223 -0
  31. Medfl/LearningManager/params.yaml +14 -0
  32. Medfl/LearningManager/params_optimiser.py +442 -0
  33. Medfl/LearningManager/plot.py +229 -0
  34. Medfl/LearningManager/server.py +181 -0
  35. Medfl/LearningManager/strategy.py +82 -0
  36. Medfl/LearningManager/utils.py +331 -0
  37. Medfl/NetManager/__init__.py +10 -0
  38. Medfl/NetManager/database_connector.py +43 -0
  39. Medfl/NetManager/dataset.py +92 -0
  40. Medfl/NetManager/flsetup.py +320 -0
  41. Medfl/NetManager/net_helper.py +254 -0
  42. Medfl/NetManager/net_manager_queries.py +142 -0
  43. Medfl/NetManager/network.py +194 -0
  44. Medfl/NetManager/node.py +184 -0
  45. Medfl/__init__.py +3 -0
  46. Medfl/scripts/__init__.py +2 -0
  47. Medfl/scripts/base.py +30 -0
  48. Medfl/scripts/create_db.py +126 -0
  49. alembic/env.py +61 -61
  50. {MEDfl-0.2.1.dist-info → medfl-2.0.1.dist-info}/METADATA +120 -108
  51. medfl-2.0.1.dist-info/RECORD +55 -0
  52. {MEDfl-0.2.1.dist-info → medfl-2.0.1.dist-info}/WHEEL +1 -1
  53. {MEDfl-0.2.1.dist-info → medfl-2.0.1.dist-info/licenses}/LICENSE +674 -674
  54. MEDfl-0.2.1.dist-info/RECORD +0 -31
  55. {MEDfl-0.2.1.dist-info → medfl-2.0.1.dist-info}/top_level.txt +0 -0
@@ -1,223 +1,223 @@
1
- #!/usr/bin/env python3
2
- # froked from https://github.com/pythonlessons/mltu/blob/main/mltu/torch/model.py
3
-
4
- import typing
5
- from collections import OrderedDict
6
- from typing import Dict, List, Optional, Tuple
7
-
8
- import numpy as np
9
- import torch
10
- import torch.nn as nn
11
- from sklearn.metrics import accuracy_score,roc_auc_score
12
-
13
- from .utils import params
14
-
15
-
16
- class Model:
17
- """
18
- Model class for training and testing PyTorch neural networks.
19
-
20
- Attributes:
21
- model (torch.nn.Module): PyTorch neural network.
22
- optimizer (torch.optim.Optimizer): PyTorch optimizer.
23
- criterion (typing.Callable): Loss function.
24
- """
25
-
26
- def __init__(
27
- self,
28
- model: torch.nn.Module,
29
- optimizer: torch.optim.Optimizer,
30
- criterion: typing.Callable,
31
- ) -> None:
32
- """
33
- Initialize Model class with the specified model, optimizer, and criterion.
34
-
35
- Args:
36
- model (torch.nn.Module): PyTorch neural network.
37
- optimizer (torch.optim.Optimizer): PyTorch optimizer.
38
- criterion (typing.Callable): Loss function.
39
- """
40
- self.model = model
41
- self.optimizer = optimizer
42
- self.criterion = criterion
43
- # Get device on which model is running
44
- self.validate()
45
-
46
- def validate(self) -> None:
47
- """
48
- Validate model and optimizer.
49
- """
50
- if not isinstance(self.model, torch.nn.Module):
51
- raise TypeError("model argument must be a torch.nn.Module")
52
-
53
- if not isinstance(self.optimizer, torch.optim.Optimizer):
54
- raise TypeError(
55
- "optimizer argument must be a torch.optim.Optimizer"
56
- )
57
-
58
- def get_parameters(self) -> List[np.ndarray]:
59
- """
60
- Get the parameters of the model as a list of NumPy arrays.
61
-
62
- Returns:
63
- List[np.ndarray]: The parameters of the model as a list of NumPy arrays.
64
- """
65
- return [
66
- val.cpu().numpy() for _, val in self.model.state_dict().items()
67
- ]
68
-
69
- def set_parameters(self, parameters: List[np.ndarray]) -> None:
70
- """
71
- Set the parameters of the model from a list of NumPy arrays.
72
-
73
- Args:
74
- parameters (List[np.ndarray]): The parameters to be set.
75
- """
76
- params_dict = zip(self.model.state_dict().keys(), parameters)
77
- state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})
78
- self.model.load_state_dict(state_dict, strict=True)
79
-
80
- def train(
81
- self, train_loader, epoch, device, privacy_engine, diff_priv=False
82
- ) -> float:
83
- """
84
- Train the model on the given train_loader for one epoch.
85
-
86
- Args:
87
- train_loader: The data loader for training data.
88
- epoch (int): The current epoch number.
89
- device: The device on which to perform the training.
90
- privacy_engine: The privacy engine used for differential privacy (if enabled).
91
- diff_priv (bool, optional): Whether differential privacy is used. Default is False.
92
-
93
- Returns:
94
- float: The value of epsilon used in differential privacy.
95
- """
96
- self.model.train()
97
- epsilon = 0
98
- losses = []
99
- top1_acc = []
100
-
101
- for i, (X_train, y_train) in enumerate(train_loader):
102
- X_train, y_train = X_train.to(device), y_train.to(device)
103
-
104
- self.optimizer.zero_grad()
105
-
106
- # compute output
107
- y_hat = torch.squeeze(self.model(X_train), 1)
108
- loss = self.criterion(y_hat, y_train)
109
-
110
- preds = np.argmax(y_hat.detach().cpu().numpy(), axis=0)
111
- labels = y_train.detach().cpu().numpy()
112
-
113
- # measure accuracy and record loss
114
- acc = (preds == labels).mean()
115
-
116
- losses.append(loss.item())
117
- top1_acc.append(acc)
118
-
119
- loss.backward()
120
- self.optimizer.step()
121
-
122
- if diff_priv:
123
- epsilon = privacy_engine.get_epsilon(float(params["DELTA"]))
124
-
125
- if (i + 1) % 10 == 0:
126
- if diff_priv:
127
- epsilon = privacy_engine.get_epsilon(float(params["DELTA"]))
128
- print(
129
- f"\tTrain Epoch: {epoch} \t"
130
- f"Loss: {np.mean(losses):.6f} "
131
- f"Acc@1: {np.mean(top1_acc) * 100:.6f} "
132
- f"(ε = {epsilon:.2f}, δ = {params['DELTA']})"
133
- )
134
- else:
135
- print(
136
- f"\tTrain Epoch: {epoch} \t"
137
- f"Loss: {np.mean(losses):.6f} "
138
- f"Acc@1: {np.mean(top1_acc) * 100:.6f}"
139
- )
140
-
141
- return epsilon
142
-
143
- def evaluate(self, val_loader, device=torch.device("cpu")) -> Tuple[float, float]:
144
- """
145
- Evaluate the model on the given validation data.
146
-
147
- Args:
148
- val_loader: The data loader for validation data.
149
- device: The device on which to perform the evaluation. Default is 'cpu'.
150
-
151
- Returns:
152
- Tuple[float, float]: The evaluation loss and accuracy.
153
- """
154
- correct, total, loss, accuracy, auc = 0, 0, 0.0, [], []
155
- self.model.eval()
156
-
157
- with torch.no_grad():
158
- for X_test, y_test in val_loader:
159
- X_test, y_test = X_test.to(device), y_test.to(device) # Move data to device
160
-
161
- y_hat = torch.squeeze(self.model(X_test), 1)
162
-
163
-
164
- criterion = self.criterion.to(y_hat.device)
165
- loss += criterion(y_hat, y_test).item()
166
-
167
-
168
- # Move y_hat to CPU for accuracy computation
169
- y_hat_cpu = y_hat.cpu().detach().numpy()
170
- accuracy.append(accuracy_score(y_test.cpu().numpy(), y_hat_cpu.round()))
171
-
172
- # Move y_test to CPU for AUC computation
173
- y_test_cpu = y_test.cpu().numpy()
174
- y_prob_cpu = y_hat.cpu().detach().numpy()
175
- if (len(np.unique(y_test_cpu)) != 1):
176
- auc.append(roc_auc_score(y_test_cpu, y_prob_cpu))
177
-
178
- total += y_test.size(0)
179
- correct += np.sum(y_hat_cpu.round() == y_test_cpu)
180
-
181
- loss /= len(val_loader.dataset)
182
- return loss, np.mean(accuracy), np.mean(auc)
183
-
184
-
185
- @staticmethod
186
- def save_model(model , model_name:str):
187
- """
188
- Saves a PyTorch model to a file.
189
-
190
- Args:
191
- model (torch.nn.Module): PyTorch model to be saved.
192
- model_name (str): Name of the model file.
193
-
194
- Raises:
195
- Exception: If there is an issue during the saving process.
196
-
197
- Returns:
198
- None
199
- """
200
- try:
201
- torch.save(model, '../../notebooks/.ipynb_checkpoints/trainedModels/' + model_name + ".pth")
202
- except Exception as e:
203
- raise Exception(f"Error saving the model: {str(e)}")
204
-
205
- @staticmethod
206
- def load_model(model_path: str):
207
- """
208
- Loads a PyTorch model from a file.
209
-
210
- Args:
211
- model_path (str): Path to the model file to be loaded.
212
-
213
- Returns:
214
- torch.nn.Module: Loaded PyTorch model.
215
- """
216
- # Ensure models are loaded onto the CPU when CUDA is not available
217
- if torch.cuda.is_available():
218
- loaded_model = torch.load(model_path)
219
- else:
220
- loaded_model = torch.load(model_path, map_location=torch.device('cpu'))
221
- return loaded_model
222
-
223
-
1
+ #!/usr/bin/env python3
2
+ # froked from https://github.com/pythonlessons/mltu/blob/main/mltu/torch/model.py
3
+
4
+ import typing
5
+ from collections import OrderedDict
6
+ from typing import Dict, List, Optional, Tuple
7
+
8
+ import numpy as np
9
+ import torch
10
+ import torch.nn as nn
11
+ from sklearn.metrics import accuracy_score,roc_auc_score
12
+
13
+ from .utils import params
14
+
15
+
16
+ class Model:
17
+ """
18
+ Model class for training and testing PyTorch neural networks.
19
+
20
+ Attributes:
21
+ model (torch.nn.Module): PyTorch neural network.
22
+ optimizer (torch.optim.Optimizer): PyTorch optimizer.
23
+ criterion (typing.Callable): Loss function.
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ model: torch.nn.Module,
29
+ optimizer: torch.optim.Optimizer,
30
+ criterion: typing.Callable,
31
+ ) -> None:
32
+ """
33
+ Initialize Model class with the specified model, optimizer, and criterion.
34
+
35
+ Args:
36
+ model (torch.nn.Module): PyTorch neural network.
37
+ optimizer (torch.optim.Optimizer): PyTorch optimizer.
38
+ criterion (typing.Callable): Loss function.
39
+ """
40
+ self.model = model
41
+ self.optimizer = optimizer
42
+ self.criterion = criterion
43
+ # Get device on which model is running
44
+ self.validate()
45
+
46
+ def validate(self) -> None:
47
+ """
48
+ Validate model and optimizer.
49
+ """
50
+ if not isinstance(self.model, torch.nn.Module):
51
+ raise TypeError("model argument must be a torch.nn.Module")
52
+
53
+ if not isinstance(self.optimizer, torch.optim.Optimizer):
54
+ raise TypeError(
55
+ "optimizer argument must be a torch.optim.Optimizer"
56
+ )
57
+
58
+ def get_parameters(self) -> List[np.ndarray]:
59
+ """
60
+ Get the parameters of the model as a list of NumPy arrays.
61
+
62
+ Returns:
63
+ List[np.ndarray]: The parameters of the model as a list of NumPy arrays.
64
+ """
65
+ return [
66
+ val.cpu().numpy() for _, val in self.model.state_dict().items()
67
+ ]
68
+
69
+ def set_parameters(self, parameters: List[np.ndarray]) -> None:
70
+ """
71
+ Set the parameters of the model from a list of NumPy arrays.
72
+
73
+ Args:
74
+ parameters (List[np.ndarray]): The parameters to be set.
75
+ """
76
+ params_dict = zip(self.model.state_dict().keys(), parameters)
77
+ state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})
78
+ self.model.load_state_dict(state_dict, strict=True)
79
+
80
+ def train(
81
+ self, train_loader, epoch, device, privacy_engine, diff_priv=False
82
+ ) -> float:
83
+ """
84
+ Train the model on the given train_loader for one epoch.
85
+
86
+ Args:
87
+ train_loader: The data loader for training data.
88
+ epoch (int): The current epoch number.
89
+ device: The device on which to perform the training.
90
+ privacy_engine: The privacy engine used for differential privacy (if enabled).
91
+ diff_priv (bool, optional): Whether differential privacy is used. Default is False.
92
+
93
+ Returns:
94
+ float: The value of epsilon used in differential privacy.
95
+ """
96
+ self.model.train()
97
+ epsilon = 0
98
+ losses = []
99
+ top1_acc = []
100
+
101
+ for i, (X_train, y_train) in enumerate(train_loader):
102
+ X_train, y_train = X_train.to(device), y_train.to(device)
103
+
104
+ self.optimizer.zero_grad()
105
+
106
+ # compute output
107
+ y_hat = torch.squeeze(self.model(X_train), 1)
108
+ loss = self.criterion(y_hat, y_train)
109
+
110
+ preds = np.argmax(y_hat.detach().cpu().numpy(), axis=0)
111
+ labels = y_train.detach().cpu().numpy()
112
+
113
+ # measure accuracy and record loss
114
+ acc = (preds == labels).mean()
115
+
116
+ losses.append(loss.item())
117
+ top1_acc.append(acc)
118
+
119
+ loss.backward()
120
+ self.optimizer.step()
121
+
122
+ if diff_priv:
123
+ epsilon = privacy_engine.get_epsilon(float(params["DELTA"]))
124
+
125
+ if (i + 1) % 10 == 0:
126
+ if diff_priv:
127
+ epsilon = privacy_engine.get_epsilon(float(params["DELTA"]))
128
+ print(
129
+ f"\tTrain Epoch: {epoch} \t"
130
+ f"Loss: {np.mean(losses):.6f} "
131
+ f"Acc@1: {np.mean(top1_acc) * 100:.6f} "
132
+ f"(ε = {epsilon:.2f}, δ = {params['DELTA']})"
133
+ )
134
+ else:
135
+ print(
136
+ f"\tTrain Epoch: {epoch} \t"
137
+ f"Loss: {np.mean(losses):.6f} "
138
+ f"Acc@1: {np.mean(top1_acc) * 100:.6f}"
139
+ )
140
+
141
+ return epsilon
142
+
143
+ def evaluate(self, val_loader, device=torch.device("cpu")) -> Tuple[float, float]:
144
+ """
145
+ Evaluate the model on the given validation data.
146
+
147
+ Args:
148
+ val_loader: The data loader for validation data.
149
+ device: The device on which to perform the evaluation. Default is 'cpu'.
150
+
151
+ Returns:
152
+ Tuple[float, float]: The evaluation loss and accuracy.
153
+ """
154
+ correct, total, loss, accuracy, auc = 0, 0, 0.0, [], []
155
+ self.model.eval()
156
+
157
+ with torch.no_grad():
158
+ for X_test, y_test in val_loader:
159
+ X_test, y_test = X_test.to(device), y_test.to(device) # Move data to device
160
+
161
+ y_hat = torch.squeeze(self.model(X_test), 1)
162
+
163
+
164
+ criterion = self.criterion.to(y_hat.device)
165
+ loss += criterion(y_hat, y_test).item()
166
+
167
+
168
+ # Move y_hat to CPU for accuracy computation
169
+ y_hat_cpu = y_hat.cpu().detach().numpy()
170
+ accuracy.append(accuracy_score(y_test.cpu().numpy(), y_hat_cpu.round()))
171
+
172
+ # Move y_test to CPU for AUC computation
173
+ y_test_cpu = y_test.cpu().numpy()
174
+ y_prob_cpu = y_hat.cpu().detach().numpy()
175
+ if (len(np.unique(y_test_cpu)) != 1):
176
+ auc.append(roc_auc_score(y_test_cpu, y_prob_cpu))
177
+
178
+ total += y_test.size(0)
179
+ correct += np.sum(y_hat_cpu.round() == y_test_cpu)
180
+
181
+ loss /= len(val_loader.dataset)
182
+ return loss, np.mean(accuracy), np.mean(auc)
183
+
184
+
185
+ @staticmethod
186
+ def save_model(model , model_name:str):
187
+ """
188
+ Saves a PyTorch model to a file.
189
+
190
+ Args:
191
+ model (torch.nn.Module): PyTorch model to be saved.
192
+ model_name (str): Name of the model file.
193
+
194
+ Raises:
195
+ Exception: If there is an issue during the saving process.
196
+
197
+ Returns:
198
+ None
199
+ """
200
+ try:
201
+ torch.save(model, '../../notebooks/.ipynb_checkpoints/trainedModels/' + model_name + ".pth")
202
+ except Exception as e:
203
+ raise Exception(f"Error saving the model: {str(e)}")
204
+
205
+ @staticmethod
206
+ def load_model(model_path: str):
207
+ """
208
+ Loads a PyTorch model from a file.
209
+
210
+ Args:
211
+ model_path (str): Path to the model file to be loaded.
212
+
213
+ Returns:
214
+ torch.nn.Module: Loaded PyTorch model.
215
+ """
216
+ # Ensure models are loaded onto the CPU when CUDA is not available
217
+ if torch.cuda.is_available():
218
+ loaded_model = torch.load(model_path)
219
+ else:
220
+ loaded_model = torch.load(model_path, map_location=torch.device('cpu'))
221
+ return loaded_model
222
+
223
+
@@ -1,14 +1,14 @@
1
- DELTA: 1.0e-05
2
- EPSILON: 5.0
3
- MAX_GRAD_NORM: 1.0
4
- diff_privacy: true
5
- lr: 0.001
6
- min_evalclient: 2
7
- num_rounds: 20
8
- optimizer: SGD
9
- path_to_master_csv: /home/local/USHERBROOKE/saho6810/MEDfl/code/MEDfl/notebooks/data/masterDataSet/miniDiabete.csv
10
- path_to_test_csv: /home/local/USHERBROOKE/saho6810/MEDfl/code/MEDfl/notebooks/data/masterDataSet/Mimic_train.csv
11
- task: BinaryClassification
12
- test_batch_size: 1
13
- train_batch_size: 32
14
- train_epochs: 20
1
+ DELTA: 1.0e-05
2
+ EPSILON: 5.0
3
+ MAX_GRAD_NORM: 1.0
4
+ diff_privacy: true
5
+ lr: 0.001
6
+ min_evalclient: 2
7
+ num_rounds: 20
8
+ optimizer: SGD
9
+ path_to_master_csv: /home/local/USHERBROOKE/saho6810/MEDfl/code/MEDfl/notebooks/data/masterDataSet/miniDiabete.csv
10
+ path_to_test_csv: /home/local/USHERBROOKE/saho6810/MEDfl/code/MEDfl/notebooks/data/masterDataSet/Mimic_train.csv
11
+ task: BinaryClassification
12
+ test_batch_size: 1
13
+ train_batch_size: 32
14
+ train_epochs: 20