MEDfl 2.0.4.dev1__py3-none-any.whl → 2.0.4.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. MEDfl/rw/client.py +98 -29
  2. MEDfl/rw/model.py +28 -0
  3. MEDfl/rw/server.py +71 -18
  4. MEDfl/rw/strategy.py +72 -78
  5. {MEDfl-2.0.4.dev1.dist-info → MEDfl-2.0.4.dev3.dist-info}/METADATA +1 -1
  6. MEDfl-2.0.4.dev3.dist-info/RECORD +36 -0
  7. MEDfl/rw/rwConfig.py +0 -21
  8. MEDfl/rw/verbose_server.py +0 -21
  9. MEDfl-2.0.4.dev1.dist-info/RECORD +0 -62
  10. Medfl/LearningManager/__init__.py +0 -13
  11. Medfl/LearningManager/client.py +0 -150
  12. Medfl/LearningManager/dynamicModal.py +0 -287
  13. Medfl/LearningManager/federated_dataset.py +0 -60
  14. Medfl/LearningManager/flpipeline.py +0 -192
  15. Medfl/LearningManager/model.py +0 -223
  16. Medfl/LearningManager/params.yaml +0 -14
  17. Medfl/LearningManager/params_optimiser.py +0 -442
  18. Medfl/LearningManager/plot.py +0 -229
  19. Medfl/LearningManager/server.py +0 -181
  20. Medfl/LearningManager/strategy.py +0 -82
  21. Medfl/LearningManager/utils.py +0 -331
  22. Medfl/NetManager/__init__.py +0 -10
  23. Medfl/NetManager/database_connector.py +0 -43
  24. Medfl/NetManager/dataset.py +0 -92
  25. Medfl/NetManager/flsetup.py +0 -320
  26. Medfl/NetManager/net_helper.py +0 -254
  27. Medfl/NetManager/net_manager_queries.py +0 -142
  28. Medfl/NetManager/network.py +0 -194
  29. Medfl/NetManager/node.py +0 -184
  30. Medfl/__init__.py +0 -3
  31. Medfl/scripts/__init__.py +0 -2
  32. Medfl/scripts/base.py +0 -30
  33. Medfl/scripts/create_db.py +0 -126
  34. {MEDfl-2.0.4.dev1.dist-info → MEDfl-2.0.4.dev3.dist-info}/LICENSE +0 -0
  35. {MEDfl-2.0.4.dev1.dist-info → MEDfl-2.0.4.dev3.dist-info}/WHEEL +0 -0
  36. {MEDfl-2.0.4.dev1.dist-info → MEDfl-2.0.4.dev3.dist-info}/top_level.txt +0 -0
@@ -1,92 +0,0 @@
1
- import pandas as pd
2
- from sqlalchemy import text
3
-
4
- from .net_helper import *
5
- from .net_manager_queries import (DELETE_DATASET, INSERT_DATASET,
6
- SELECT_ALL_DATASET_NAMES)
7
- from MEDfl.NetManager.database_connector import DatabaseManager
8
-
9
- class DataSet:
10
- def __init__(self, name: str, path: str, engine=None):
11
- """
12
- Initialize a DataSet object.
13
-
14
- :param name: The name of the dataset.
15
- :type name: str
16
- :param path: The file path of the dataset CSV file.
17
- :type path: str
18
- """
19
- self.name = name
20
- self.path = path
21
- db_manager = DatabaseManager()
22
- db_manager.connect()
23
- self.engine = db_manager.get_connection()
24
-
25
- def validate(self):
26
- """
27
- Validate name and path attributes.
28
-
29
- :raises TypeError: If name or path is not a string.
30
- """
31
- if not isinstance(self.name, str):
32
- raise TypeError("name argument must be a string")
33
-
34
- if not isinstance(self.path, str):
35
- raise TypeError("path argument must be a string")
36
-
37
- def upload_dataset(self, NodeId=-1):
38
- """
39
- Upload the dataset to the database.
40
-
41
- :param NodeId: The NodeId associated with the dataset.
42
- :type NodeId: int
43
-
44
- Notes:
45
- - Assumes the file at self.path is a valid CSV file.
46
- - The dataset is uploaded to the 'DataSets' table in the database.
47
- """
48
-
49
- data_df = pd.read_csv(self.path)
50
- nodeId = NodeId
51
- columns = data_df.columns.tolist()
52
-
53
-
54
- data_df = process_eicu(data_df)
55
- for index, row in data_df.iterrows():
56
- query_1 = "INSERT INTO DataSets(DataSetName,nodeId," + "".join(
57
- f"{x}," for x in columns
58
- )
59
- query_2 = f" VALUES ('{self.name}',{nodeId}, " + "".join(
60
- f"{is_str(data_df, row, x)}," for x in columns
61
- )
62
- query = query_1[:-1] + ")" + query_2[:-1] + ")"
63
-
64
- self.engine.execute(text(query))
65
-
66
- def delete_dataset(self):
67
- """
68
- Delete the dataset from the database.
69
-
70
- Notes:
71
- - Assumes the dataset name is unique in the 'DataSets' table.
72
- """
73
- self.engine.execute(text(DELETE_DATASET), {"name": self.name})
74
-
75
- def update_data(self):
76
- """
77
- Update the data in the dataset.
78
-
79
- Not implemented yet.
80
- """
81
- pass
82
-
83
- @staticmethod
84
- def list_alldatasets(engine):
85
- """
86
- List all dataset names from the 'DataSets' table.
87
-
88
- :returns: A DataFrame containing the names of all datasets in the 'DataSets' table.
89
- :rtype: pd.DataFrame
90
- """
91
- res = pd.read_sql(text(SELECT_ALL_DATASET_NAMES), engine)
92
- return res
@@ -1,320 +0,0 @@
1
- from datetime import datetime
2
-
3
-
4
- from torch.utils.data import random_split, DataLoader, Dataset
5
-
6
- from MEDfl.LearningManager.federated_dataset import FederatedDataset
7
- from .net_helper import *
8
- from .net_manager_queries import * # Import the sql_queries module
9
- from .network import Network
10
-
11
- from .node import Node
12
-
13
- from MEDfl.NetManager.database_connector import DatabaseManager
14
-
15
-
16
- class FLsetup:
17
- def __init__(self, name: str, description: str, network: Network):
18
- """Initialize a Federated Learning (FL) setup.
19
-
20
- Args:
21
- name (str): The name of the FL setup.
22
- description (str): A description of the FL setup.
23
- network (Network): An instance of the Network class representing the network architecture.
24
- """
25
- self.name = name
26
- self.description = description
27
- self.network = network
28
- self.column_name = None
29
- self.auto = 1 if self.column_name is not None else 0
30
- self.validate()
31
- self.fed_dataset = None
32
-
33
- db_manager = DatabaseManager()
34
- db_manager.connect()
35
- self.eng = db_manager.get_connection()
36
-
37
-
38
-
39
- def validate(self):
40
- """Validate name, description, and network."""
41
- if not isinstance(self.name, str):
42
- raise TypeError("name argument must be a string")
43
-
44
- if not isinstance(self.description, str):
45
- raise TypeError("description argument must be a string")
46
-
47
- if not isinstance(self.network, Network):
48
- raise TypeError(
49
- "network argument must be a MEDfl.NetManager.Network "
50
- )
51
-
52
- def create(self):
53
- """Create an FL setup."""
54
- creation_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
55
- netid = get_netid_from_name(self.network.name)
56
- self.eng.execute(
57
- text(CREATE_FLSETUP_QUERY),
58
- {
59
- "name": self.name,
60
- "description": self.description,
61
- "creation_date": creation_date,
62
- "net_id": netid,
63
- "column_name": self.column_name,
64
- },
65
- )
66
- self.id = get_flsetupid_from_name(self.name)
67
-
68
- def delete(self):
69
- """Delete the FL setup."""
70
- if self.fed_dataset is not None:
71
- self.fed_dataset.delete_Flsetup(FLsetupId=self.id)
72
- self.eng.execute(text(DELETE_FLSETUP_QUERY), {"name": self.name})
73
-
74
- @classmethod
75
- def read_setup(cls, FLsetupId: int):
76
- """Read the FL setup by FLsetupId.
77
-
78
- Args:
79
- FLsetupId (int): The id of the FL setup to read.
80
-
81
- Returns:
82
- FLsetup: An instance of the FLsetup class with the specified FLsetupId.
83
- """
84
- db_manager = DatabaseManager()
85
- db_manager.connect()
86
- my_eng = db_manager.get_connection()
87
-
88
- res = pd.read_sql(
89
- text(READ_SETUP_QUERY), my_eng, params={"flsetup_id": FLsetupId}
90
- ).iloc[0]
91
-
92
- network_res = pd.read_sql(
93
- text(READ_NETWORK_BY_ID_QUERY),
94
- my_eng,
95
- params={"net_id": int(res["NetId"])},
96
- ).iloc[0]
97
- network = Network(network_res["NetName"])
98
- setattr(network, "id", res["NetId"])
99
- fl_setup = cls(res["name"], res["description"], network)
100
- if res["column_name"] == str(None):
101
- res["column_name"] = None
102
- setattr(fl_setup, "column_name", res["column_name"])
103
- setattr(fl_setup, "id", res["FLsetupId"])
104
-
105
- return fl_setup
106
-
107
- @staticmethod
108
- def list_allsetups():
109
- """List all the FL setups.
110
-
111
- Returns:
112
- DataFrame: A DataFrame containing information about all the FL setups.
113
- """
114
- db_manager = DatabaseManager()
115
- db_manager.connect()
116
- my_eng = db_manager.get_connection()
117
-
118
- Flsetups = pd.read_sql(text(READ_ALL_SETUPS_QUERY), my_eng)
119
- return Flsetups
120
-
121
- def create_nodes_from_master_dataset(self, params_dict: dict):
122
- """Create nodes from the master dataset.
123
-
124
- Args:
125
- params_dict (dict): A dictionary containing parameters for node creation.
126
- - column_name (str): The name of the column in the MasterDataset used to create nodes.
127
- - train_nodes (list): A list of node names that will be used for training.
128
- - test_nodes (list): A list of node names that will be used for testing.
129
-
130
- Returns:
131
- list: A list of Node instances created from the master dataset.
132
- """
133
- assert "column_name" in params_dict.keys()
134
- column_name, train_nodes, test_nodes = (
135
- params_dict["column_name"],
136
- params_dict["train_nodes"],
137
- params_dict["test_nodes"],
138
- )
139
- self.column_name = column_name
140
- self.auto = 1
141
-
142
- # Update the Column name of the auto flSetup
143
- query = f"UPDATE FLsetup SET column_name = '{column_name}' WHERE name = '{self.name}'"
144
- self.eng.execute(text(query))
145
-
146
-
147
- # Add Network to DB
148
- # self.network.create_network()
149
-
150
- netid = get_netid_from_name(self.network.name)
151
-
152
- assert self.network.mtable_exists == 1
153
- node_names = pd.read_sql(
154
- text(READ_DISTINCT_NODES_QUERY.format(column_name)), self.eng
155
- )
156
-
157
- nodes = [Node(val[0], 1) for val in node_names.values.tolist()]
158
-
159
- used_nodes = []
160
-
161
- for node in nodes:
162
- if node.name in train_nodes:
163
- node.train = 1
164
- node.create_node(netid)
165
- used_nodes.append(node)
166
- if node.name in test_nodes:
167
- node.train =0
168
- node.create_node(netid)
169
- used_nodes.append(node)
170
- return used_nodes
171
-
172
- def create_dataloader_from_node(
173
- self,
174
- node: Node,
175
- output,
176
- fill_strategy="mean", fit_encode=[], to_drop=[],
177
- train_batch_size: int = 32,
178
- test_batch_size: int = 1,
179
- split_frac: float = 0.2,
180
- dataset: Dataset = None,
181
-
182
- ):
183
- """Create DataLoader from a Node.
184
-
185
- Args:
186
- node (Node): The node from which to create DataLoader.
187
- train_batch_size (int): The batch size for training data.
188
- test_batch_size (int): The batch size for test data.
189
- split_frac (float): The fraction of data to be used for training.
190
- dataset (Dataset): The dataset to use. If None, the method will read the dataset from the node.
191
-
192
- Returns:
193
- DataLoader: The DataLoader instances for training and testing.
194
- """
195
- if dataset is None:
196
- if self.column_name is not None:
197
- dataset = process_data_after_reading(
198
- node.get_dataset(self.column_name), output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop
199
- )
200
- else:
201
- dataset = process_data_after_reading(
202
- node.get_dataset(), output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop)
203
-
204
- dataset_size = len(dataset)
205
- traindata_size = int(dataset_size * (1 - split_frac))
206
- traindata, testdata = random_split(
207
- dataset, [traindata_size, dataset_size - traindata_size]
208
- )
209
- trainloader, testloader = DataLoader(
210
- traindata, batch_size=train_batch_size
211
- ), DataLoader(testdata, batch_size=test_batch_size)
212
- return trainloader, testloader
213
-
214
- def create_federated_dataset(
215
- self, output, fill_strategy="mean", fit_encode=[], to_drop=[], val_frac=0.1, test_frac=0.2
216
- ) -> FederatedDataset:
217
- """Create a federated dataset.
218
-
219
- Args:
220
- output (string): the output feature of the dataset
221
- val_frac (float): The fraction of data to be used for validation.
222
- test_frac (float): The fraction of data to be used for testing.
223
-
224
- Returns:
225
- FederatedDataset: The FederatedDataset instance containing train, validation, and test data.
226
- """
227
-
228
- if not self.column_name:
229
- to_drop.extend(["DataSetName" , "NodeId" , "DataSetId"])
230
- else :
231
- to_drop.extend(["PatientId"])
232
-
233
- netid = self.network.id
234
- train_nodes = pd.read_sql(
235
- text(
236
- f"SELECT Nodes.NodeName FROM Nodes WHERE Nodes.NetId = {netid} AND Nodes.train = 1 "
237
- ),
238
- self.eng,
239
- )
240
- test_nodes = pd.read_sql(
241
- text(
242
- f"SELECT Nodes.NodeName FROM Nodes WHERE Nodes.NetId = {netid} AND Nodes.train = 0 "
243
- ),
244
- self.eng,
245
- )
246
-
247
- train_nodes = [
248
- Node(val[0], 1, test_frac) for val in train_nodes.values.tolist()
249
- ]
250
- test_nodes = [Node(val[0], 0) for val in test_nodes.values.tolist()]
251
-
252
- trainloaders, valloaders, testloaders = [], [], []
253
- # if len(test_nodes) == 0:
254
- # raise "test node empty"
255
- if test_nodes is None:
256
- _, testloader = self.create_dataloader_from_node(
257
- train_nodes[0], output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop)
258
- testloaders.append(testloader)
259
- else:
260
- for train_node in train_nodes:
261
- train_valloader, testloader = self.create_dataloader_from_node(
262
- train_node, output, fill_strategy=fill_strategy,
263
- fit_encode=fit_encode, to_drop=to_drop,)
264
- trainloader, valloader = self.create_dataloader_from_node(
265
- train_node,
266
- output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop,
267
- test_batch_size=32,
268
- split_frac=val_frac,
269
- dataset=train_valloader.dataset,
270
- )
271
- trainloaders.append(trainloader)
272
- valloaders.append(valloader)
273
- testloaders.append(testloader)
274
-
275
- for test_node in test_nodes:
276
- _, testloader = self.create_dataloader_from_node(
277
- test_node, output, fill_strategy=fill_strategy, fit_encode=fit_encode, to_drop=to_drop, split_frac=1.0
278
- )
279
- testloaders.append(testloader)
280
- train_nodes_names = [node.name for node in train_nodes]
281
- test_nodes_names = train_nodes_names + [
282
- node.name for node in test_nodes
283
- ]
284
-
285
- # test_nodes_names = [
286
- # node.name for node in test_nodes
287
- # ]
288
-
289
- # Add FlSetup on to the DataBase
290
- # self.create()
291
-
292
- # self.network.update_network(FLsetupId=self.id)
293
- fed_dataset = FederatedDataset(
294
- self.name + "_Feddataset",
295
- train_nodes_names,
296
- test_nodes_names,
297
- trainloaders,
298
- valloaders,
299
- testloaders,
300
- )
301
- self.fed_dataset = fed_dataset
302
- self.fed_dataset.create(self.id)
303
- return self.fed_dataset
304
-
305
-
306
-
307
-
308
- def get_flDataSet(self):
309
- """
310
- Retrieve the federated dataset associated with the FL setup using the FL setup's name.
311
-
312
- Returns:
313
- pandas.DataFrame: DataFrame containing the federated dataset information.
314
- """
315
- return pd.read_sql(
316
- text(
317
- f"SELECT * FROM FedDatasets WHERE FLsetupId = {get_flsetupid_from_name(self.name)}"
318
- ),
319
- self.eng,
320
- )
@@ -1,254 +0,0 @@
1
- from sklearn.preprocessing import LabelEncoder
2
- from sklearn.impute import SimpleImputer
3
-
4
- from sqlalchemy import text
5
-
6
- import torch
7
- import pandas as pd
8
- from torch.utils.data import TensorDataset
9
- import numpy as np
10
-
11
- from MEDfl.NetManager.database_connector import DatabaseManager
12
-
13
-
14
- def is_str(data_df, row, x):
15
- """
16
- Check if a column in a DataFrame is of type 'object' and convert the value accordingly.
17
-
18
- Args:
19
- data_df (pandas.DataFrame): DataFrame containing the data.
20
- row (pandas.Series): Data row.
21
- x (str): Column name.
22
-
23
- Returns:
24
- str or float: Processed value based on the column type.
25
- """
26
- if data_df[x].dtype == "object":
27
- x = f"'{row[x]}'"
28
- else:
29
- x = row[x]
30
- return x
31
-
32
-
33
- def process_eicu(data_df):
34
- """
35
- Process eICU data by filling missing values with mean and replacing NaNs with 'Unknown'.
36
-
37
- Args:
38
- data_df (pandas.DataFrame): Input data.
39
-
40
- Returns:
41
- pandas.DataFrame: Processed data.
42
- """
43
- # Identify numeric and non-numeric columns
44
- numeric_columns = data_df.select_dtypes(include=[np.number]).columns
45
- non_numeric_columns = data_df.select_dtypes(exclude=[np.number]).columns
46
-
47
- # Fill NaN in numeric columns with mean
48
- data_df[numeric_columns] = data_df[numeric_columns].fillna(
49
- data_df[numeric_columns].mean())
50
-
51
- # Fill NaN in non-numeric columns with 'Unknown'
52
- data_df[non_numeric_columns] = data_df[non_numeric_columns].fillna(
53
- 'Unknown')
54
-
55
- try:
56
- data_df = data_df.reset_index(drop=True)
57
- except:
58
- pass
59
-
60
- return data_df
61
-
62
-
63
- # remove indiserd columns after reading from the DB
64
- def process_data_after_reading(data, output, fill_strategy="mean", fit_encode=[], to_drop=[]):
65
- """
66
- Process data after reading from the database, including encoding, dropping columns, and creating a PyTorch TensorDataset.
67
-
68
- Args:
69
- data (pandas.DataFrame): Input data.
70
- output (str): Output column name.
71
- fill_strategy (str, optional): Imputation strategy for missing values. Default is "mean".
72
- fit_encode (list, optional): List of columns to be label-encoded. Default is an empty list.
73
- to_drop (list, optional): List of columns to be dropped from the DataFrame. Default is an empty list.
74
-
75
- Returns:
76
- torch.utils.data.TensorDataset: Processed data as a PyTorch TensorDataset.
77
- """
78
-
79
- # Check if there is a DataSet assigned to the node
80
- if (len(data) == 0):
81
- raise "Node doesn't Have dataSet"
82
-
83
- encoder = LabelEncoder()
84
- # En Code some columns
85
- for s in fit_encode:
86
- try:
87
- data[s] = encoder.fit_transform(data[s])
88
- except:
89
- raise print(s)
90
-
91
- # The output of the DATA
92
- y = data[output]
93
-
94
- X = data
95
-
96
- # remove indisered columns when reading the dataframe from the DB
97
- for column in to_drop:
98
- try:
99
- X = X.drop(
100
- [column], axis=1
101
- )
102
- except Exception as e:
103
- raise e
104
-
105
- # Get the DATAset Features
106
- features = [col for col in X.columns if col != output]
107
-
108
- # Impute missing values using the mean strategy
109
- try:
110
- imputer = SimpleImputer(strategy=fill_strategy)
111
- X[features] = imputer.fit_transform(X[features])
112
- except:
113
- print()
114
-
115
- X = torch.tensor(X.values, dtype=torch.float32)
116
- y = torch.tensor(y.values, dtype=torch.float32)
117
- data = TensorDataset(X, y)
118
-
119
- return data
120
-
121
-
122
- def get_nodeid_from_name(name):
123
- """
124
- Get the NodeId from the Nodes table based on the NodeName.
125
-
126
- Args:
127
- name (str): Node name.
128
-
129
- Returns:
130
- int or None: NodeId or None if not found.
131
- """
132
- db_manager = DatabaseManager()
133
- db_manager.connect()
134
- my_eng = db_manager.get_connection()
135
-
136
- result_proxy = my_eng.execute(f"SELECT NodeId FROM Nodes WHERE NodeName = '{name}'")
137
- NodeId = int(result_proxy.fetchone()[0])
138
- return NodeId
139
-
140
-
141
- def get_netid_from_name(name):
142
- """
143
- Get the Network Id from the Networks table based on the NetName.
144
-
145
- Args:
146
- name (str): Network name.
147
-
148
- Returns:
149
- int or None: NetId or None if not found.
150
- """
151
- db_manager = DatabaseManager()
152
- db_manager.connect()
153
- my_eng = db_manager.get_connection()
154
-
155
- try:
156
- result_proxy = my_eng.execute(f"SELECT NetId FROM Networks WHERE NetName = '{name}'")
157
- NetId = int(result_proxy.fetchone()[0])
158
- except:
159
- NetId = None
160
- return NetId
161
-
162
-
163
- def get_flsetupid_from_name(name):
164
- """
165
- Get the FLsetupId from the FLsetup table based on the FL setup name.
166
-
167
- Args:
168
- name (str): FL setup name.
169
-
170
- Returns:
171
- int or None: FLsetupId or None if not found.
172
- """
173
- db_manager = DatabaseManager()
174
- db_manager.connect()
175
- my_eng = db_manager.get_connection()
176
-
177
- try:
178
-
179
- result_proxy = my_eng.execute(f"SELECT FLsetupId FROM FLsetup WHERE name = '{name}'")
180
- id = int(result_proxy.fetchone()[0])
181
-
182
- except:
183
- id = None
184
- return id
185
-
186
-
187
- def get_flpipeline_from_name(name):
188
- """
189
- Get the FLpipeline Id from the FLpipeline table based on the FL pipeline name.
190
-
191
- Args:
192
- name (str): FL pipeline name.
193
-
194
- Returns:
195
- int or None: FLpipelineId or None if not found.
196
- """
197
- db_manager = DatabaseManager()
198
- db_manager.connect()
199
- my_eng = db_manager.get_connection()
200
-
201
- try:
202
-
203
- result_proxy = my_eng.execute(f"SELECT id FROM FLpipeline WHERE name = '{name}'")
204
- id = int(result_proxy.fetchone()[0])
205
- except:
206
- id = None
207
- return id
208
-
209
-
210
- def get_feddataset_id_from_name(name):
211
- """
212
- Get the Federated dataset Id from the FedDatasets table based on the federated dataset name.
213
-
214
- Args:
215
- name (str): Federated dataset name.
216
-
217
- Returns:
218
- int or None: FedId or None if not found.
219
- """
220
- db_manager = DatabaseManager()
221
- db_manager.connect()
222
- my_eng = db_manager.get_connection()
223
-
224
- try:
225
-
226
- result_proxy = my_eng.execute(f"SELECT FedId FROM FedDatasets WHERE name = '{name}'")
227
- id = int(result_proxy.fetchone()[0])
228
- except:
229
- id = None
230
- return id
231
-
232
-
233
- def master_table_exists():
234
- """
235
- Check if the MasterDataset table exists in the database.
236
-
237
- Returns:
238
- bool: True if the table exists, False otherwise.
239
- """
240
- try:
241
- db_manager = DatabaseManager()
242
- db_manager.connect()
243
- my_eng = db_manager.get_connection()
244
-
245
- # SQLite-specific query to check if table exists
246
- sql_query = text("SELECT name FROM sqlite_master WHERE type='table' AND name='MasterDataset'")
247
- result = my_eng.execute(sql_query)
248
- exists = result.fetchone() is not None
249
- return exists
250
-
251
- except Exception as e:
252
- print(f"Error checking MasterDataset table existence: {e}")
253
- return False
254
-