MEDfl 0.1.6__py3-none-any.whl → 0.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,243 +1,264 @@
1
- from sklearn.preprocessing import LabelEncoder
2
- from sklearn.impute import SimpleImputer
3
-
4
- from sqlalchemy import text
5
-
6
- import torch
7
- import pandas as pd
8
- from torch.utils.data import TensorDataset
9
- import numpy as np
10
-
11
-
12
- from scripts.base import my_eng
13
-
14
-
15
- def is_str(data_df, row, x):
16
- """
17
- Check if a column in a DataFrame is of type 'object' and convert the value accordingly.
18
-
19
- Args:
20
- data_df (pandas.DataFrame): DataFrame containing the data.
21
- row (pandas.Series): Data row.
22
- x (str): Column name.
23
-
24
- Returns:
25
- str or float: Processed value based on the column type.
26
- """
27
- if data_df[x].dtype == "object":
28
- x = f"'{row[x]}'"
29
- else:
30
- x = row[x]
31
- return x
32
-
33
-
34
- def process_eicu(data_df):
35
- """
36
- Process eICU data by filling missing values with mean and replacing NaNs with 'Unknown'.
37
-
38
- Args:
39
- data_df (pandas.DataFrame): Input data.
40
-
41
- Returns:
42
- pandas.DataFrame: Processed data.
43
- """
44
- # Identify numeric and non-numeric columns
45
- numeric_columns = data_df.select_dtypes(include=[np.number]).columns
46
- non_numeric_columns = data_df.select_dtypes(exclude=[np.number]).columns
47
-
48
- # Fill NaN in numeric columns with mean
49
- data_df[numeric_columns] = data_df[numeric_columns].fillna(data_df[numeric_columns].mean())
50
-
51
- # Fill NaN in non-numeric columns with 'Unknown'
52
- data_df[non_numeric_columns] = data_df[non_numeric_columns].fillna('Unknown')
53
-
54
- try:
55
- data_df = data_df.reset_index(drop=True)
56
- except:
57
- pass
58
-
59
- return data_df
60
-
61
-
62
- # remove indiserd columns after reading from the DB
63
- def process_data_after_reading(data, output, fill_strategy="mean", fit_encode=[], to_drop=[]):
64
- """
65
- Process data after reading from the database, including encoding, dropping columns, and creating a PyTorch TensorDataset.
66
-
67
- Args:
68
- data (pandas.DataFrame): Input data.
69
- output (str): Output column name.
70
- fill_strategy (str, optional): Imputation strategy for missing values. Default is "mean".
71
- fit_encode (list, optional): List of columns to be label-encoded. Default is an empty list.
72
- to_drop (list, optional): List of columns to be dropped from the DataFrame. Default is an empty list.
73
-
74
- Returns:
75
- torch.utils.data.TensorDataset: Processed data as a PyTorch TensorDataset.
76
- """
77
-
78
- # Check if there is a DataSet assigned to the node
79
- if (len(data) == 0):
80
- raise "Node doesn't Have dataSet"
81
-
82
- encoder = LabelEncoder()
83
- # En Code some columns
84
- for s in fit_encode:
85
- try:
86
- data[s] = encoder.fit_transform(data[s])
87
- except:
88
- raise print(s)
89
-
90
- # The output of the DATA
91
- y = data[output]
92
-
93
- X = data
94
-
95
-
96
- # remove indisered columns when reading the dataframe from the DB
97
- for column in to_drop:
98
- try:
99
- X = X.drop(
100
- [column], axis=1
101
- )
102
- except Exception as e:
103
- raise e
104
-
105
-
106
- # Get the DATAset Features
107
- features = [col for col in X.columns if col != output]
108
-
109
- # Impute missing values using the mean strategy
110
- try:
111
- imputer = SimpleImputer(strategy=fill_strategy)
112
- X[features] = imputer.fit_transform(X[features])
113
- except:
114
- print()
115
-
116
- X = torch.tensor(X.values, dtype=torch.float32)
117
- y = torch.tensor(y.values, dtype=torch.float32)
118
- data = TensorDataset(X, y)
119
-
120
- return data
121
-
122
-
123
- def get_nodeid_from_name(name):
124
- """
125
- Get the NodeId from the Nodes table based on the NodeName.
126
-
127
- Args:
128
- name (str): Node name.
129
-
130
- Returns:
131
- int or None: NodeId or None if not found.
132
- """
133
-
134
- NodeId = int(
135
- pd.read_sql(
136
- text(f"SELECT NodeId FROM Nodes WHERE NodeName = '{name}'"), my_eng
137
- ).iloc[0, 0]
138
- )
139
- return NodeId
140
-
141
-
142
- def get_netid_from_name(name):
143
- """
144
- Get the Network Id from the Networks table based on the NetName.
145
-
146
- Args:
147
- name (str): Network name.
148
-
149
- Returns:
150
- int or None: NetId or None if not found.
151
- """
152
- try:
153
- NetId = int(
154
- pd.read_sql(
155
- text(f"SELECT NetId FROM Networks WHERE NetName = '{name}'"),
156
- my_eng,
157
- ).iloc[0, 0]
158
- )
159
- except:
160
- NetId = None
161
- return NetId
162
-
163
-
164
- def get_flsetupid_from_name(name):
165
- """
166
- Get the FLsetupId from the FLsetup table based on the FL setup name.
167
-
168
- Args:
169
- name (str): FL setup name.
170
-
171
- Returns:
172
- int or None: FLsetupId or None if not found.
173
- """
174
- try:
175
- id = int(
176
- pd.read_sql(
177
- text(f"SELECT FLsetupId FROM FLsetup WHERE name = '{name}'"),
178
- my_eng,
179
- ).iloc[0, 0]
180
- )
181
- except:
182
- id = None
183
- return id
184
-
185
-
186
- def get_flpipeline_from_name(name):
187
- """
188
- Get the FLpipeline Id from the FLpipeline table based on the FL pipeline name.
189
-
190
- Args:
191
- name (str): FL pipeline name.
192
-
193
- Returns:
194
- int or None: FLpipelineId or None if not found.
195
- """
196
- try:
197
- id = int(
198
- pd.read_sql(
199
- text(f"SELECT id FROM FLpipeline WHERE name = '{name}'"),
200
- my_eng,
201
- ).iloc[0, 0]
202
- )
203
- except:
204
- id = None
205
- return id
206
-
207
-
208
- def get_feddataset_id_from_name(name):
209
- """
210
- Get the Federated dataset Id from the FedDatasets table based on the federated dataset name.
211
-
212
- Args:
213
- name (str): Federated dataset name.
214
-
215
- Returns:
216
- int or None: FedId or None if not found.
217
- """
218
- try:
219
- id = int(
220
- pd.read_sql(
221
- text(f"SELECT FedId FROM FedDatasets WHERE name = '{name}'"),
222
- my_eng,
223
- ).iloc[0, 0]
224
- )
225
- except:
226
- id = None
227
- return id
228
-
229
-
230
- def master_table_exists():
231
- """
232
- Check if the MasterDataset table exists in the database.
233
-
234
- Returns:
235
- bool: True if the table exists, False otherwise.
236
- """
237
-
238
- return pd.read_sql(
239
- text(
240
- " SELECT EXISTS ( SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_NAME = 'MasterDataset' )"
241
- ),
242
- my_eng,
243
- ).values[0][0]
1
+ from sklearn.preprocessing import LabelEncoder
2
+ from sklearn.impute import SimpleImputer
3
+
4
+ from sqlalchemy import text
5
+
6
+ import torch
7
+ import pandas as pd
8
+ from torch.utils.data import TensorDataset
9
+ import numpy as np
10
+
11
+ from Medfl.NetManager.database_connector import DatabaseManager
12
+
13
+
14
+ def is_str(data_df, row, x):
15
+ """
16
+ Check if a column in a DataFrame is of type 'object' and convert the value accordingly.
17
+
18
+ Args:
19
+ data_df (pandas.DataFrame): DataFrame containing the data.
20
+ row (pandas.Series): Data row.
21
+ x (str): Column name.
22
+
23
+ Returns:
24
+ str or float: Processed value based on the column type.
25
+ """
26
+ if data_df[x].dtype == "object":
27
+ x = f"'{row[x]}'"
28
+ else:
29
+ x = row[x]
30
+ return x
31
+
32
+
33
+ def process_eicu(data_df):
34
+ """
35
+ Process eICU data by filling missing values with mean and replacing NaNs with 'Unknown'.
36
+
37
+ Args:
38
+ data_df (pandas.DataFrame): Input data.
39
+
40
+ Returns:
41
+ pandas.DataFrame: Processed data.
42
+ """
43
+ # Identify numeric and non-numeric columns
44
+ numeric_columns = data_df.select_dtypes(include=[np.number]).columns
45
+ non_numeric_columns = data_df.select_dtypes(exclude=[np.number]).columns
46
+
47
+ # Fill NaN in numeric columns with mean
48
+ data_df[numeric_columns] = data_df[numeric_columns].fillna(
49
+ data_df[numeric_columns].mean())
50
+
51
+ # Fill NaN in non-numeric columns with 'Unknown'
52
+ data_df[non_numeric_columns] = data_df[non_numeric_columns].fillna(
53
+ 'Unknown')
54
+
55
+ try:
56
+ data_df = data_df.reset_index(drop=True)
57
+ except:
58
+ pass
59
+
60
+ return data_df
61
+
62
+
63
+ # remove indiserd columns after reading from the DB
64
+ def process_data_after_reading(data, output, fill_strategy="mean", fit_encode=[], to_drop=[]):
65
+ """
66
+ Process data after reading from the database, including encoding, dropping columns, and creating a PyTorch TensorDataset.
67
+
68
+ Args:
69
+ data (pandas.DataFrame): Input data.
70
+ output (str): Output column name.
71
+ fill_strategy (str, optional): Imputation strategy for missing values. Default is "mean".
72
+ fit_encode (list, optional): List of columns to be label-encoded. Default is an empty list.
73
+ to_drop (list, optional): List of columns to be dropped from the DataFrame. Default is an empty list.
74
+
75
+ Returns:
76
+ torch.utils.data.TensorDataset: Processed data as a PyTorch TensorDataset.
77
+ """
78
+
79
+ # Check if there is a DataSet assigned to the node
80
+ if (len(data) == 0):
81
+ raise "Node doesn't Have dataSet"
82
+
83
+ encoder = LabelEncoder()
84
+ # En Code some columns
85
+ for s in fit_encode:
86
+ try:
87
+ data[s] = encoder.fit_transform(data[s])
88
+ except:
89
+ raise print(s)
90
+
91
+ # The output of the DATA
92
+ y = data[output]
93
+
94
+ X = data
95
+
96
+ # remove indisered columns when reading the dataframe from the DB
97
+ for column in to_drop:
98
+ try:
99
+ X = X.drop(
100
+ [column], axis=1
101
+ )
102
+ except Exception as e:
103
+ raise e
104
+
105
+ # Get the DATAset Features
106
+ features = [col for col in X.columns if col != output]
107
+
108
+ # Impute missing values using the mean strategy
109
+ try:
110
+ imputer = SimpleImputer(strategy=fill_strategy)
111
+ X[features] = imputer.fit_transform(X[features])
112
+ except:
113
+ print()
114
+
115
+ X = torch.tensor(X.values, dtype=torch.float32)
116
+ y = torch.tensor(y.values, dtype=torch.float32)
117
+ data = TensorDataset(X, y)
118
+
119
+ return data
120
+
121
+
122
+ def get_nodeid_from_name(name):
123
+ """
124
+ Get the NodeId from the Nodes table based on the NodeName.
125
+
126
+ Args:
127
+ name (str): Node name.
128
+
129
+ Returns:
130
+ int or None: NodeId or None if not found.
131
+ """
132
+ db_manager = DatabaseManager()
133
+ db_manager.connect()
134
+ my_eng = db_manager.get_connection()
135
+
136
+ NodeId = int(
137
+ pd.read_sql(
138
+ text(f"SELECT NodeId FROM Nodes WHERE NodeName = '{name}'"), my_eng
139
+ ).iloc[0, 0]
140
+ )
141
+ return NodeId
142
+
143
+
144
+ def get_netid_from_name(name):
145
+ """
146
+ Get the Network Id from the Networks table based on the NetName.
147
+
148
+ Args:
149
+ name (str): Network name.
150
+
151
+ Returns:
152
+ int or None: NetId or None if not found.
153
+ """
154
+ db_manager = DatabaseManager()
155
+ db_manager.connect()
156
+ my_eng = db_manager.get_connection()
157
+
158
+ try:
159
+ NetId = int(
160
+ pd.read_sql(
161
+ text(f"SELECT NetId FROM Networks WHERE NetName = '{name}'"),
162
+ my_eng,
163
+ ).iloc[0, 0]
164
+ )
165
+ except:
166
+ NetId = None
167
+ return NetId
168
+
169
+
170
+ def get_flsetupid_from_name(name):
171
+ """
172
+ Get the FLsetupId from the FLsetup table based on the FL setup name.
173
+
174
+ Args:
175
+ name (str): FL setup name.
176
+
177
+ Returns:
178
+ int or None: FLsetupId or None if not found.
179
+ """
180
+ db_manager = DatabaseManager()
181
+ db_manager.connect()
182
+ my_eng = db_manager.get_connection()
183
+
184
+ try:
185
+ id = int(
186
+ pd.read_sql(
187
+ text(f"SELECT FLsetupId FROM FLsetup WHERE name = '{name}'"),
188
+ my_eng,
189
+ ).iloc[0, 0]
190
+ )
191
+ except:
192
+ id = None
193
+ return id
194
+
195
+
196
+ def get_flpipeline_from_name(name):
197
+ """
198
+ Get the FLpipeline Id from the FLpipeline table based on the FL pipeline name.
199
+
200
+ Args:
201
+ name (str): FL pipeline name.
202
+
203
+ Returns:
204
+ int or None: FLpipelineId or None if not found.
205
+ """
206
+ db_manager = DatabaseManager()
207
+ db_manager.connect()
208
+ my_eng = db_manager.get_connection()
209
+
210
+ try:
211
+ id = int(
212
+ pd.read_sql(
213
+ text(f"SELECT id FROM FLpipeline WHERE name = '{name}'"),
214
+ my_eng,
215
+ ).iloc[0, 0]
216
+ )
217
+ except:
218
+ id = None
219
+ return id
220
+
221
+
222
+ def get_feddataset_id_from_name(name):
223
+ """
224
+ Get the Federated dataset Id from the FedDatasets table based on the federated dataset name.
225
+
226
+ Args:
227
+ name (str): Federated dataset name.
228
+
229
+ Returns:
230
+ int or None: FedId or None if not found.
231
+ """
232
+ db_manager = DatabaseManager()
233
+ db_manager.connect()
234
+ my_eng = db_manager.get_connection()
235
+
236
+ try:
237
+ id = int(
238
+ pd.read_sql(
239
+ text(f"SELECT FedId FROM FedDatasets WHERE name = '{name}'"),
240
+ my_eng,
241
+ ).iloc[0, 0]
242
+ )
243
+ except:
244
+ id = None
245
+ return id
246
+
247
+
248
+ def master_table_exists():
249
+ """
250
+ Check if the MasterDataset table exists in the database.
251
+
252
+ Returns:
253
+ bool: True if the table exists, False otherwise.
254
+ """
255
+ db_manager = DatabaseManager()
256
+ db_manager.connect()
257
+ my_eng = db_manager.get_connection()
258
+
259
+ return pd.read_sql(
260
+ text(
261
+ " SELECT EXISTS ( SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_NAME = 'MasterDataset' )"
262
+ ),
263
+ my_eng,
264
+ ).values[0][0]