MEDfl 0.1.31__py3-none-any.whl → 0.1.33__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. {MEDfl-0.1.31.dist-info → MEDfl-0.1.33.dist-info}/METADATA +127 -128
  2. MEDfl-0.1.33.dist-info/RECORD +34 -0
  3. {MEDfl-0.1.31.dist-info → MEDfl-0.1.33.dist-info}/WHEEL +1 -1
  4. {MEDfl-0.1.31.dist-info → MEDfl-0.1.33.dist-info}/top_level.txt +0 -1
  5. Medfl/LearningManager/__init__.py +13 -13
  6. Medfl/LearningManager/client.py +150 -150
  7. Medfl/LearningManager/dynamicModal.py +287 -287
  8. Medfl/LearningManager/federated_dataset.py +60 -60
  9. Medfl/LearningManager/flpipeline.py +192 -192
  10. Medfl/LearningManager/model.py +223 -223
  11. Medfl/LearningManager/params.yaml +14 -14
  12. Medfl/LearningManager/params_optimiser.py +442 -442
  13. Medfl/LearningManager/plot.py +229 -229
  14. Medfl/LearningManager/server.py +181 -181
  15. Medfl/LearningManager/strategy.py +82 -82
  16. Medfl/LearningManager/utils.py +331 -308
  17. Medfl/NetManager/__init__.py +10 -9
  18. Medfl/NetManager/database_connector.py +43 -48
  19. Medfl/NetManager/dataset.py +92 -92
  20. Medfl/NetManager/flsetup.py +320 -320
  21. Medfl/NetManager/net_helper.py +254 -248
  22. Medfl/NetManager/net_manager_queries.py +142 -137
  23. Medfl/NetManager/network.py +194 -174
  24. Medfl/NetManager/node.py +184 -178
  25. Medfl/__init__.py +3 -2
  26. Medfl/scripts/__init__.py +2 -0
  27. Medfl/scripts/base.py +30 -0
  28. Medfl/scripts/create_db.py +126 -0
  29. alembic/env.py +61 -61
  30. scripts/base.py +29 -29
  31. scripts/config.ini +5 -5
  32. scripts/create_db.py +133 -133
  33. MEDfl/LearningManager/__init__.py +0 -13
  34. MEDfl/LearningManager/client.py +0 -150
  35. MEDfl/LearningManager/dynamicModal.py +0 -287
  36. MEDfl/LearningManager/federated_dataset.py +0 -60
  37. MEDfl/LearningManager/flpipeline.py +0 -192
  38. MEDfl/LearningManager/model.py +0 -223
  39. MEDfl/LearningManager/params.yaml +0 -14
  40. MEDfl/LearningManager/params_optimiser.py +0 -442
  41. MEDfl/LearningManager/plot.py +0 -229
  42. MEDfl/LearningManager/server.py +0 -181
  43. MEDfl/LearningManager/strategy.py +0 -82
  44. MEDfl/LearningManager/utils.py +0 -333
  45. MEDfl/NetManager/__init__.py +0 -9
  46. MEDfl/NetManager/database_connector.py +0 -48
  47. MEDfl/NetManager/dataset.py +0 -92
  48. MEDfl/NetManager/flsetup.py +0 -320
  49. MEDfl/NetManager/net_helper.py +0 -248
  50. MEDfl/NetManager/net_manager_queries.py +0 -137
  51. MEDfl/NetManager/network.py +0 -174
  52. MEDfl/NetManager/node.py +0 -178
  53. MEDfl/__init__.py +0 -2
  54. MEDfl-0.1.31.data/scripts/setup_mysql.sh +0 -22
  55. MEDfl-0.1.31.dist-info/RECORD +0 -54
  56. scripts/db_config.ini +0 -6
Medfl/NetManager/node.py CHANGED
@@ -1,178 +1,184 @@
1
- import pandas as pd
2
-
3
- from .net_helper import *
4
- from .net_manager_queries import *
5
- from MEDfl.LearningManager.utils import params
6
- from MEDfl.NetManager.database_connector import DatabaseManager
7
-
8
- class Node:
9
- """
10
- A class representing a node in the network.
11
-
12
- Attributes:
13
- name (str): The name of the node.
14
- train (int): An integer flag representing whether the node is used for training (1) or testing (0).
15
- test_fraction (float, optional): The fraction of data used for testing when train=1. Default is 0.2.
16
- """
17
-
18
- def __init__(
19
- self, name: str, train: int, test_fraction: float = 0.2, engine=None
20
- ):
21
- """
22
- Initialize a Node instance.
23
-
24
- Parameters:
25
- name (str): The name of the node.
26
- train (int): An integer flag representing whether the node is used for training (1) or testing (0).
27
- test_fraction (float, optional): The fraction of data used for testing when train=1. Default is 0.2.
28
- """
29
- self.name = name
30
- self.train = train
31
- self.test_fraction = 1.0 if self.train == 0 else test_fraction
32
-
33
-
34
- db_manager = DatabaseManager() ;
35
- db_manager.connect() ;
36
- self.engine = db_manager.get_connection()
37
-
38
- def validate(self):
39
- """Validate name, train, test_fraction"""
40
- if not isinstance(self.name, str):
41
- raise TypeError("name argument must be a string")
42
-
43
- if not isinstance(self.train, int):
44
- raise TypeError("train argument must be an int")
45
-
46
- if not isinstance(self.test_fraction, float):
47
- raise TypeError("test_fraction argument must be a float")
48
-
49
- def create_node(self, NetId: int):
50
- """Create a node in the database.
51
- Parameters:
52
- NetId (int): The ID of the network to which the node belongs.
53
-
54
- Returns:
55
- None
56
- """
57
- self.engine.execute(
58
- text(INSERT_NODE_QUERY.format(self.name, NetId, self.train))
59
- )
60
-
61
- def delete_node(self):
62
- """Delete the node from the database."""
63
- self.engine.execute(text(DELETE_NODE_QUERY.format(self.name)))
64
-
65
- def check_dataset_compatibility(self, data_df):
66
- """Check if the dataset is compatible with the master dataset.
67
- Parameters:
68
- data_df (DataFrame): The dataset to check.
69
-
70
- Returns:
71
- None
72
- """
73
- if master_table_exists() != 1:
74
- print("MasterDataset doesn't exist")
75
- else:
76
- columns = data_df.columns.tolist()
77
-
78
- # get master_dataset columns
79
- result_proxy = self.engine.execute(SELECT_MASTER_COLUMNS_QUERY)
80
- master_table_columns = result_proxy.keys()
81
-
82
-
83
- assert [x == y for x, y in zip(master_table_columns, columns)]
84
-
85
- def update_node(self):
86
- """Update the node information (not implemented)."""
87
- pass
88
-
89
- def get_dataset(self, column_name: str = None):
90
- """Get the dataset for the node based on the given column name.
91
- Parameters:
92
- column_name (str, optional): The column name to filter the dataset. Default is None.
93
-
94
- Returns:
95
- DataFrame: The dataset associated with the node.
96
- """
97
- NodeId = get_nodeid_from_name(self.name)
98
- if column_name is not None:
99
- query = text(SELECT_DATASET_BY_COLUMN_QUERY.format(column_name, self.name))
100
- else:
101
- query = text(SELECT_DATASET_BY_NODE_ID_QUERY.format(NodeId))
102
-
103
- result_proxy = self.engine.execute(query)
104
- node_dataset = pd.DataFrame(result_proxy.fetchall(), columns=result_proxy.keys())
105
-
106
- return node_dataset
107
-
108
- def upload_dataset(self, dataset_name: str, path_to_csv: str = params['path_to_test_csv']):
109
- """Upload the dataset to the database for the node.
110
- Parameters:
111
- dataset_name (str): The name of the dataset.
112
- path_to_csv (str, optional): Path to the CSV file containing the dataset. Default is the path in params.
113
-
114
- Returns:
115
- None
116
- """
117
- data_df = pd.read_csv(path_to_csv)
118
-
119
- nodeId = get_nodeid_from_name(self.name)
120
- columns = data_df.columns.tolist()
121
- self.check_dataset_compatibility(data_df)
122
-
123
- data_df = process_eicu(data_df)
124
- for index, row in data_df.iterrows():
125
- query_1 = "INSERT INTO DataSets(DataSetName,nodeId," + "".join(
126
- f"{x}," for x in columns
127
- )
128
- query_2 = f" VALUES ('{dataset_name}',{nodeId}, " + "".join(
129
- f"{is_str(data_df, row, x)}," for x in columns
130
- )
131
- query = query_1[:-1] + ")" + query_2[:-1] + ")"
132
- self.engine.execute(text(query))
133
-
134
- def assign_dataset(self, dataset_name:str):
135
- """Assigning existing dataSet to node
136
- Parameters:
137
- dataset_name (str): The name of the dataset to assign.
138
-
139
- Returns:
140
- None
141
- """
142
-
143
- nodeId = get_nodeid_from_name(self.name)
144
- query = f"UPDATE DataSets SET nodeId = {nodeId} WHERE DataSetName = '{dataset_name}'"
145
- self.engine.execute(text(query))
146
-
147
- def unassign_dataset(self, dataset_name:str):
148
- """unssigning existing dataSet to node
149
- Parameters:
150
- dataset_name (str): The name of the dataset to assign.
151
-
152
- Returns:
153
- None
154
- """
155
-
156
- query = f"UPDATE DataSets SET nodeId = {-1} WHERE DataSetName = '{dataset_name}'"
157
- self.engine.execute(text(query))
158
-
159
- def list_alldatasets(self):
160
- """List all datasets associated with the node.
161
- Returns:
162
- DataFrame: A DataFrame containing information about all datasets associated with the node.
163
-
164
- """
165
- return pd.read_sql(
166
- text(SELECT_ALL_DATASETS_QUERY.format(self.name)), my_eng
167
- )
168
-
169
- @staticmethod
170
- def list_allnodes():
171
- """List all nodes in the database.
172
- Returns:
173
- DataFrame: A DataFrame containing information about all nodes in the database.
174
-
175
- """
176
- query = text(SELECT_ALL_NODES_QUERY)
177
- res = pd.read_sql(query, my_eng)
178
- return res
1
+ import pandas as pd
2
+
3
+ from .net_helper import *
4
+ from .net_manager_queries import *
5
+ from MEDfl.LearningManager.utils import params
6
+ from MEDfl.NetManager.database_connector import DatabaseManager
7
+
8
+ from sqlalchemy import text, exc
9
+
10
+
11
+ class Node:
12
+ """
13
+ A class representing a node in the network.
14
+
15
+ Attributes:
16
+ name (str): The name of the node.
17
+ train (int): An integer flag representing whether the node is used for training (1) or testing (0).
18
+ test_fraction (float, optional): The fraction of data used for testing when train=1. Default is 0.2.
19
+ """
20
+
21
+ def __init__(
22
+ self, name: str, train: int, test_fraction: float = 0.2, engine=None
23
+ ):
24
+ """
25
+ Initialize a Node instance.
26
+
27
+ Parameters:
28
+ name (str): The name of the node.
29
+ train (int): An integer flag representing whether the node is used for training (1) or testing (0).
30
+ test_fraction (float, optional): The fraction of data used for testing when train=1. Default is 0.2.
31
+ """
32
+ self.name = name
33
+ self.train = train
34
+ self.test_fraction = 1.0 if self.train == 0 else test_fraction
35
+
36
+
37
+ db_manager = DatabaseManager() ;
38
+ db_manager.connect() ;
39
+ self.engine = db_manager.get_connection()
40
+
41
+ def validate(self):
42
+ """Validate name, train, test_fraction"""
43
+ if not isinstance(self.name, str):
44
+ raise TypeError("name argument must be a string")
45
+
46
+ if not isinstance(self.train, int):
47
+ raise TypeError("train argument must be an int")
48
+
49
+ if not isinstance(self.test_fraction, float):
50
+ raise TypeError("test_fraction argument must be a float")
51
+
52
+ def create_node(self, NetId: int):
53
+ """Create a node in the database.
54
+ Parameters:
55
+ NetId (int): The ID of the network to which the node belongs.
56
+
57
+ Returns:
58
+ None
59
+ """
60
+ self.engine.execute(
61
+ text(INSERT_NODE_QUERY.format(self.name, NetId, self.train))
62
+ )
63
+
64
+ def delete_node(self):
65
+ """Delete the node from the database."""
66
+ self.engine.execute(text(DELETE_NODE_QUERY.format(self.name)))
67
+
68
+ def check_dataset_compatibility(self, data_df):
69
+ """Check if the dataset is compatible with the master dataset.
70
+ Parameters:
71
+ data_df (DataFrame): The dataset to check.
72
+
73
+ Returns:
74
+ None
75
+ """
76
+ if master_table_exists() != 1:
77
+ print("MasterDataset doesn't exist")
78
+ else:
79
+ columns = data_df.columns.tolist()
80
+
81
+ # get master_dataset columns
82
+ result_proxy = self.engine.execute(SELECT_MASTER_COLUMNS_QUERY)
83
+ master_table_columns = result_proxy.keys()
84
+
85
+
86
+ assert [x == y for x, y in zip(master_table_columns, columns)]
87
+
88
+ def update_node(self):
89
+ """Update the node information (not implemented)."""
90
+ pass
91
+
92
+ def get_dataset(self, column_name: str = None):
93
+ """Get the dataset for the node based on the given column name.
94
+ Parameters:
95
+ column_name (str, optional): The column name to filter the dataset. Default is None.
96
+
97
+ Returns:
98
+ DataFrame: The dataset associated with the node.
99
+ """
100
+ NodeId = get_nodeid_from_name(self.name)
101
+ if column_name is not None:
102
+ query = text(SELECT_DATASET_BY_COLUMN_QUERY.format(column_name, self.name))
103
+ else:
104
+ query = text(SELECT_DATASET_BY_NODE_ID_QUERY.format(NodeId))
105
+
106
+ result_proxy = self.engine.execute(query)
107
+ node_dataset = pd.DataFrame(result_proxy.fetchall(), columns=result_proxy.keys())
108
+
109
+ return node_dataset
110
+
111
+ def upload_dataset(self, dataset_name: str, path_to_csv: str = params['path_to_test_csv']):
112
+ """Upload the dataset to the database for the node.
113
+
114
+ Parameters:
115
+ dataset_name (str): The name of the dataset.
116
+ path_to_csv (str, optional): Path to the CSV file containing the dataset. Default is the path in params.
117
+
118
+ Returns:
119
+ None
120
+ """
121
+ try:
122
+ data_df = pd.read_csv(path_to_csv)
123
+ nodeId = get_nodeid_from_name(self.name)
124
+ columns = data_df.columns.tolist()
125
+ self.check_dataset_compatibility(data_df)
126
+
127
+ data_df = process_eicu(data_df)
128
+
129
+ # Insert data in batches
130
+ batch_size = 1000 # Adjust as needed
131
+ for start_idx in range(0, len(data_df), batch_size):
132
+ batch_data = data_df.iloc[start_idx:start_idx + batch_size]
133
+ insert_query = f"INSERT INTO Datasets (DataSetName, NodeId, {', '.join(columns)}) VALUES (:dataset_name, :nodeId, {', '.join([':' + col for col in columns])})"
134
+ data_to_insert = batch_data.to_dict(orient='records')
135
+ params = [{"dataset_name": dataset_name, "nodeId": nodeId, **row} for row in data_to_insert]
136
+ self.engine.execute(text(insert_query), params)
137
+ except exc.SQLAlchemyError as e:
138
+ print(f"Error uploading dataset: {e}")
139
+
140
+ def assign_dataset(self, dataset_name:str):
141
+ """Assigning existing dataSet to node
142
+ Parameters:
143
+ dataset_name (str): The name of the dataset to assign.
144
+
145
+ Returns:
146
+ None
147
+ """
148
+
149
+ nodeId = get_nodeid_from_name(self.name)
150
+ query = f"UPDATE DataSets SET nodeId = {nodeId} WHERE DataSetName = '{dataset_name}'"
151
+ self.engine.execute(text(query))
152
+
153
+ def unassign_dataset(self, dataset_name:str):
154
+ """unssigning existing dataSet to node
155
+ Parameters:
156
+ dataset_name (str): The name of the dataset to assign.
157
+
158
+ Returns:
159
+ None
160
+ """
161
+
162
+ query = f"UPDATE DataSets SET nodeId = {-1} WHERE DataSetName = '{dataset_name}'"
163
+ self.engine.execute(text(query))
164
+
165
+ def list_alldatasets(self):
166
+ """List all datasets associated with the node.
167
+ Returns:
168
+ DataFrame: A DataFrame containing information about all datasets associated with the node.
169
+
170
+ """
171
+ return pd.read_sql(
172
+ text(SELECT_ALL_DATASETS_QUERY.format(self.name)), my_eng
173
+ )
174
+
175
+ @staticmethod
176
+ def list_allnodes():
177
+ """List all nodes in the database.
178
+ Returns:
179
+ DataFrame: A DataFrame containing information about all nodes in the database.
180
+
181
+ """
182
+ query = text(SELECT_ALL_NODES_QUERY)
183
+ res = pd.read_sql(query, my_eng)
184
+ return res
Medfl/__init__.py CHANGED
@@ -1,2 +1,3 @@
1
- from .LearningManager import *
2
- from .NetManager import *
1
+ from .LearningManager import *
2
+ from .NetManager import *
3
+ from .scripts import *
@@ -0,0 +1,2 @@
1
+ from .base import *
2
+ from .create_db import *
Medfl/scripts/base.py ADDED
@@ -0,0 +1,30 @@
1
+ import mysql.connector
2
+ from sqlalchemy import create_engine, text
3
+ from configparser import ConfigParser
4
+ import yaml
5
+ import pkg_resources
6
+ import os
7
+
8
+ # Get the directory of the current script
9
+ current_directory = os.path.dirname(os.path.abspath(__file__))
10
+
11
+ # Load configuration from the config file
12
+ config_file_path = os.path.join(current_directory, 'db_config.ini')
13
+
14
+ config = ConfigParser()
15
+ config.read(config_file_path)
16
+ mysql_config = config['mysql']
17
+
18
+
19
+
20
+ connection_string = (
21
+ f"mysql+mysqlconnector://{mysql_config['user']}:{mysql_config['password']}@"
22
+ f"{mysql_config['host']}:{mysql_config['port']}/{mysql_config['database']}"
23
+ )
24
+
25
+ eng = create_engine(
26
+ connection_string,
27
+ execution_options={"autocommit": True},
28
+ )
29
+
30
+ my_eng = eng.connect()
@@ -0,0 +1,126 @@
1
+ import sys
2
+ import sqlite3
3
+ import pandas as pd
4
+ from configparser import ConfigParser
5
+ import os
6
+ import ast
7
+
8
+ from MEDfl.LearningManager.utils import *
9
+
10
+
11
+ def main(csv_file_path):
12
+ try:
13
+ # Get the directory of the current script
14
+ current_directory = os.path.dirname(os.path.abspath(__file__))
15
+
16
+ # Load configuration from the config file
17
+ # config_file_path = os.path.join(current_directory, 'sqllite_config.ini')*
18
+
19
+ config_file_path = load_db_config()
20
+
21
+ # config = ConfigParser()
22
+ # config.read(config_file_path)
23
+ # sqlite_config = config['sqllite']
24
+
25
+ sqlite_config = config_file_path
26
+
27
+
28
+ print('Im here !')
29
+
30
+ # Connect to SQLite database (it will be created if it doesn't exist)
31
+ database_path = sqlite_config['database']
32
+ conn = sqlite3.connect(database_path)
33
+ cursor = conn.cursor()
34
+
35
+ # Drop each table if it exists
36
+ tables = ['Networks', 'FLsetup', 'Nodes', 'DataSets', 'FLpipeline', 'testResults', 'FedDatasets']
37
+ for table in tables:
38
+ cursor.execute(f"DROP TABLE IF EXISTS {table}")
39
+
40
+ # Create Networks table
41
+ cursor.execute(
42
+ "CREATE TABLE Networks( \
43
+ NetId INTEGER PRIMARY KEY AUTOINCREMENT, \
44
+ NetName TEXT \
45
+ );"
46
+ )
47
+
48
+ # Create FLsetup table
49
+ cursor.execute("CREATE TABLE FLsetup (\
50
+ FLsetupId INTEGER PRIMARY KEY AUTOINCREMENT,\
51
+ name TEXT NOT NULL, \
52
+ description TEXT NOT NULL,\
53
+ creation_date TEXT NOT NULL,\
54
+ NetId INTEGER NOT NULL,\
55
+ column_name TEXT\
56
+ )")
57
+
58
+ # Create Nodes table
59
+ cursor.execute("CREATE TABLE Nodes ( \
60
+ NodeId INTEGER PRIMARY KEY AUTOINCREMENT,\
61
+ NodeName TEXT,\
62
+ train BOOLEAN DEFAULT 1,\
63
+ NetId INTEGER\
64
+ )")
65
+
66
+ data_df = pd.read_csv(csv_file_path)
67
+ columns = data_df.columns.tolist()
68
+ column_map = {"object": "TEXT", "int64": "INTEGER", "float64": "REAL"}
69
+ sub_query = ", ".join(f"{col} {column_map[str(data_df[col].dtype)]}" for col in columns)
70
+
71
+ # Create Datasets table by getting columns from the master csv file
72
+ cursor.execute(
73
+ f"CREATE TABLE DataSets( \
74
+ DataSetId INTEGER PRIMARY KEY AUTOINCREMENT, \
75
+ DataSetName TEXT, \
76
+ NodeId INTEGER,\
77
+ {sub_query}\
78
+ )"
79
+ )
80
+
81
+ # Create FLpipeline table
82
+ cursor.execute("CREATE TABLE FLpipeline(\
83
+ id INTEGER PRIMARY KEY AUTOINCREMENT,\
84
+ name TEXT NOT NULL, \
85
+ description TEXT NOT NULL,\
86
+ creation_date TEXT NOT NULL,\
87
+ results TEXT NOT NULL\
88
+ ) ")
89
+
90
+ # Create test results table
91
+ cursor.execute("CREATE TABLE testResults(\
92
+ pipelineId INTEGER,\
93
+ nodename TEXT NOT NULL, \
94
+ confusionmatrix TEXT,\
95
+ accuracy REAL,\
96
+ sensivity REAL,\
97
+ ppv REAL,\
98
+ npv REAL,\
99
+ f1score REAL,\
100
+ fpr REAL,\
101
+ tpr REAL, \
102
+ PRIMARY KEY (pipelineId, nodename)\
103
+ ) ")
104
+
105
+ # Create FederatedDataset table
106
+ cursor.execute("CREATE TABLE FedDatasets (\
107
+ FedId INTEGER PRIMARY KEY AUTOINCREMENT,\
108
+ FLsetupId INTEGER,\
109
+ FLpipeId INTEGER,\
110
+ name TEXT NOT NULL\
111
+ )")
112
+
113
+ # Commit and close the cursor
114
+ conn.commit()
115
+ cursor.close()
116
+ conn.close()
117
+
118
+ except sqlite3.Error as e:
119
+ print(f"Error: {e}")
120
+
121
+ if __name__ == "__main__":
122
+ if len(sys.argv) != 2:
123
+ print("Usage: python script.py <path_to_csv_file>")
124
+ sys.exit(1)
125
+ csv_file_path = sys.argv[1]
126
+ main(csv_file_path)
alembic/env.py CHANGED
@@ -1,61 +1,61 @@
1
- from logging.config import fileConfig
2
- import logging
3
- from sqlalchemy import engine_from_config, create_engine
4
- from sqlalchemy import pool
5
- import sys
6
- import os
7
- from alembic import context
8
-
9
- sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
10
- from scripts.base import my_eng
11
-
12
- # this is the Alembic Config object, which provides
13
- # access to the values within the .ini file in use.
14
- config = context.config
15
-
16
- # Interpret the config file for Python logging.
17
- # This line sets up loggers basically.
18
- fileConfig(config.config_file_name)
19
-
20
- # add your model's MetaData object here
21
- # for 'autogenerate' support
22
- # from myapp import mymodel
23
- # target_metadata = mymodel.Base.metadata
24
- target_metadata = None
25
-
26
- # other values from the config, defined by the needs of env.py,
27
- # can be acquired:
28
- # my_important_option = config.get_main_option("my_important_option")
29
- # ... etc.
30
- def configure_logger(name):
31
- # This is the standard logging configuration
32
- logging.config.fileConfig(
33
- 'alembic_logging.ini', # Path to your logging configuration file
34
- defaults={
35
- 'logfilename': 'alembic.log', # Log file name
36
- },
37
- disable_existing_loggers=False,
38
- )
39
-
40
- return logging.getLogger(name)
41
-
42
-
43
-
44
- def run_migrations_offline():
45
- """Run migrations in 'offline' mode."""
46
- pass
47
-
48
- def run_migrations_online():
49
- """Run migrations in 'online' mode."""
50
- pass
51
-
52
- if context.is_offline_mode():
53
- run_migrations_offline()
54
- else:
55
- run_migrations_online()
56
-
57
-
58
- if context.is_offline_mode():
59
- run_migrations_offline()
60
- else:
61
- run_migrations_online()
1
+ from logging.config import fileConfig
2
+ import logging
3
+ from sqlalchemy import engine_from_config, create_engine
4
+ from sqlalchemy import pool
5
+ import sys
6
+ import os
7
+ from alembic import context
8
+
9
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
10
+ from scripts.base import my_eng
11
+
12
+ # this is the Alembic Config object, which provides
13
+ # access to the values within the .ini file in use.
14
+ config = context.config
15
+
16
+ # Interpret the config file for Python logging.
17
+ # This line sets up loggers basically.
18
+ fileConfig(config.config_file_name)
19
+
20
+ # add your model's MetaData object here
21
+ # for 'autogenerate' support
22
+ # from myapp import mymodel
23
+ # target_metadata = mymodel.Base.metadata
24
+ target_metadata = None
25
+
26
+ # other values from the config, defined by the needs of env.py,
27
+ # can be acquired:
28
+ # my_important_option = config.get_main_option("my_important_option")
29
+ # ... etc.
30
+ def configure_logger(name):
31
+ # This is the standard logging configuration
32
+ logging.config.fileConfig(
33
+ 'alembic_logging.ini', # Path to your logging configuration file
34
+ defaults={
35
+ 'logfilename': 'alembic.log', # Log file name
36
+ },
37
+ disable_existing_loggers=False,
38
+ )
39
+
40
+ return logging.getLogger(name)
41
+
42
+
43
+
44
+ def run_migrations_offline():
45
+ """Run migrations in 'offline' mode."""
46
+ pass
47
+
48
+ def run_migrations_online():
49
+ """Run migrations in 'online' mode."""
50
+ pass
51
+
52
+ if context.is_offline_mode():
53
+ run_migrations_offline()
54
+ else:
55
+ run_migrations_online()
56
+
57
+
58
+ if context.is_offline_mode():
59
+ run_migrations_offline()
60
+ else:
61
+ run_migrations_online()