ecodev-core 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ecodev-core might be problematic. Click here for more details.
- ecodev_core/__init__.py +79 -0
- ecodev_core/app_activity.py +108 -0
- ecodev_core/app_rights.py +24 -0
- ecodev_core/app_user.py +92 -0
- ecodev_core/auth_configuration.py +22 -0
- ecodev_core/authentication.py +226 -0
- ecodev_core/check_dependencies.py +179 -0
- ecodev_core/custom_equal.py +27 -0
- ecodev_core/db_connection.py +68 -0
- ecodev_core/db_filters.py +142 -0
- ecodev_core/db_insertion.py +108 -0
- ecodev_core/db_retrieval.py +194 -0
- ecodev_core/enum_utils.py +21 -0
- ecodev_core/list_utils.py +65 -0
- ecodev_core/logger.py +106 -0
- ecodev_core/pandas_utils.py +30 -0
- ecodev_core/permissions.py +15 -0
- ecodev_core/pydantic_utils.py +52 -0
- ecodev_core/read_write.py +40 -0
- ecodev_core/safe_utils.py +197 -0
- ecodev_core-0.0.1.dist-info/LICENSE.md +11 -0
- ecodev_core-0.0.1.dist-info/METADATA +72 -0
- ecodev_core-0.0.1.dist-info/RECORD +24 -0
- ecodev_core-0.0.1.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Module computing and checking high level dependencies in the coe (based on pydeps)
|
|
3
|
+
"""
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from subprocess import run
|
|
6
|
+
from typing import Dict
|
|
7
|
+
from typing import Iterator
|
|
8
|
+
from typing import List
|
|
9
|
+
|
|
10
|
+
from ecodev_core.logger import logger_get
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
CONF_FILE = 'dependencies.json'
|
|
14
|
+
Dependency = Dict[str, List[str]]
|
|
15
|
+
log = logger_get(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def check_dependencies(code_base: Path, theoretical_deps: Path):
|
|
19
|
+
"""
|
|
20
|
+
hook for preserving the pre established solution dependencies.
|
|
21
|
+
Compare regroupment of module dependencies matrix to a pre-computed matrix stored
|
|
22
|
+
in theoretical_deps. Computation done on code_base.
|
|
23
|
+
"""
|
|
24
|
+
dependencies = _get_current_dependencies(_valid_modules(code_base), code_base, code_base.name)
|
|
25
|
+
allowed_dependencies = _get_allowed_dependencies(theoretical_deps)
|
|
26
|
+
if not (ok_deps := _test_dependency(allowed_dependencies, dependencies)):
|
|
27
|
+
log.error('you changed high level solution dependencies. Intended?')
|
|
28
|
+
return ok_deps
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def compute_dependencies(code_base: Path, output_folder: Path, plot: bool = True):
|
|
32
|
+
"""
|
|
33
|
+
Given a code base, compute the dependencies between its high level modules.
|
|
34
|
+
Store in output_folder the dependency matrix in txt format and the png of the dependencies
|
|
35
|
+
"""
|
|
36
|
+
code_folder = code_base.name
|
|
37
|
+
modules = _valid_modules(code_base)
|
|
38
|
+
|
|
39
|
+
deps: Dependency = _get_current_dependencies(modules, code_base, code_folder)
|
|
40
|
+
|
|
41
|
+
for mod, mod_deps in deps.items():
|
|
42
|
+
with open(output_folder / f'{mod}.py', 'w') as f_stream:
|
|
43
|
+
f_stream.writelines([f'from .{other_module} import to\n' for other_module in mod_deps])
|
|
44
|
+
|
|
45
|
+
with open(output_folder / '__init__.py', 'w') as f_stream:
|
|
46
|
+
f_stream.writelines([])
|
|
47
|
+
|
|
48
|
+
with open(output_folder / f'dependencies_{code_folder}.txt', 'w') as f_stream:
|
|
49
|
+
f_stream.writelines([f'{dependency}\n' for dependency in _get_dep_matrix(modules, deps)])
|
|
50
|
+
|
|
51
|
+
if plot:
|
|
52
|
+
run(['pydeps', '.', '-T', 'png', '--no-show', '--rmprefix',
|
|
53
|
+
f'{output_folder.name}.'], cwd=output_folder)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _test_dependency(allowed_deps: Dependency, dependencies: Dependency) -> bool:
|
|
57
|
+
"""
|
|
58
|
+
For each modules stored in a dependencies.json file, check whether the current
|
|
59
|
+
module dependencies are the same as the config ones.
|
|
60
|
+
"""
|
|
61
|
+
for module in dependencies:
|
|
62
|
+
for dep in allowed_deps[module]:
|
|
63
|
+
if dep not in dependencies[module]:
|
|
64
|
+
log.error(f'{module} no longer imported in {dep}. Intended ?')
|
|
65
|
+
for dep in dependencies[module]:
|
|
66
|
+
if dep not in allowed_deps[module]:
|
|
67
|
+
log.error(f'{dep} now imported in {module}. Intended ?')
|
|
68
|
+
for dep in dependencies[module]:
|
|
69
|
+
if module in dependencies[dep] and dep != module:
|
|
70
|
+
log.error(f'Circular ref created between {module} and {dep}.')
|
|
71
|
+
return dependencies == allowed_deps
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _get_allowed_dependencies(config_path: Path) -> Dependency:
|
|
75
|
+
"""
|
|
76
|
+
Given the pre established dependency file path, compute
|
|
77
|
+
the pre established modules and their dependencies seen as an adjacency dict.
|
|
78
|
+
All the values of a given key are its dependencies.
|
|
79
|
+
The keys and the values of the dictionary take their labels in
|
|
80
|
+
the pre established module list.
|
|
81
|
+
"""
|
|
82
|
+
raw_lines = list(_safe_read_lines(config_path))
|
|
83
|
+
raw_matrix = [raw_dependency.split(',') for raw_dependency in raw_lines][1:]
|
|
84
|
+
modules = [raw_dependency[0] for raw_dependency in raw_matrix]
|
|
85
|
+
module_dependencies: Dict[str, List[str]] = {
|
|
86
|
+
module: [modules[idx_other_module] for idx_other_module in range(len(modules))
|
|
87
|
+
if raw_matrix[idx_module][idx_other_module + 1] == 'Yes']
|
|
88
|
+
for idx_module, module in enumerate(modules)
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
return module_dependencies
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _get_current_dependencies(modules: List[str],
|
|
95
|
+
code_base: Path,
|
|
96
|
+
code_folder: str) -> Dependency:
|
|
97
|
+
"""
|
|
98
|
+
Given the pre established modules, the code_base directory and the relative path of the code
|
|
99
|
+
directory wrt code_folder, compute the pre current dependencies as an adjacency dict.
|
|
100
|
+
All the values of a given key are its dependencies. The keys and the values of
|
|
101
|
+
the dictionary take their labels in the pre established module list.
|
|
102
|
+
"""
|
|
103
|
+
module_dependencies: Dependency = {}
|
|
104
|
+
for module in modules:
|
|
105
|
+
module_dependencies[module] = []
|
|
106
|
+
module_dir = code_base / module
|
|
107
|
+
for other_module in modules:
|
|
108
|
+
if other_module in module_dependencies[module]:
|
|
109
|
+
break
|
|
110
|
+
for py_file in _get_recursively_all_files_in_dir(module_dir, 'py'):
|
|
111
|
+
if _depends_on_module(module_dir / py_file, other_module, code_folder):
|
|
112
|
+
module_dependencies[module].append(other_module)
|
|
113
|
+
break
|
|
114
|
+
|
|
115
|
+
return module_dependencies
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def _depends_on_module(file: Path, module: str, code_folder: str) -> bool:
|
|
119
|
+
"""
|
|
120
|
+
check if a reference to module is in the imports of python_file
|
|
121
|
+
"""
|
|
122
|
+
return any(
|
|
123
|
+
(f'from {code_folder}.{module}' in line and 'import' in line)
|
|
124
|
+
or (line.startswith(f'import {code_folder}.{module}.'))
|
|
125
|
+
for line in _safe_read_lines(file)
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def _safe_read_lines(filename: Path) -> Iterator[str]:
|
|
130
|
+
"""
|
|
131
|
+
read all lines in file, erase the final special \n character
|
|
132
|
+
"""
|
|
133
|
+
with open(filename, 'r') as f:
|
|
134
|
+
lines = f.readlines()
|
|
135
|
+
yield from [line.strip() for line in lines]
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def _get_recursively_all_files_in_dir(code_folder: Path, extension: str) -> Iterator[Path]:
|
|
139
|
+
"""
|
|
140
|
+
Given a folder, recursively return all files of the given extension in the folder
|
|
141
|
+
"""
|
|
142
|
+
yield from code_folder.glob(f'**/*.{extension}')
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def _valid_folder(folder: Path):
|
|
146
|
+
"""
|
|
147
|
+
Return True if folder is a python regroupment of module to be considered for dependency analysis
|
|
148
|
+
"""
|
|
149
|
+
return (
|
|
150
|
+
folder.is_dir()
|
|
151
|
+
and not folder.name.startswith('.')
|
|
152
|
+
and not folder.name.startswith('_')
|
|
153
|
+
and folder.name != 'data'
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def _valid_modules(root_folder: Path):
|
|
158
|
+
"""
|
|
159
|
+
Retrieve valid solution module (found at the base level of root_folder)
|
|
160
|
+
"""
|
|
161
|
+
return sorted([folder.name for folder in root_folder.iterdir() if _valid_folder(folder)])
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def _get_dep_matrix(modules: List[str], deps: Dependency) -> List[str]:
|
|
165
|
+
"""
|
|
166
|
+
Retrieve the dependency matrix of the inspected solution in txt format
|
|
167
|
+
"""
|
|
168
|
+
dependencies = [f'module x depends on,{",".join(modules)}']
|
|
169
|
+
dependencies.extend(f'{module},' + ','.join([_depends_on(module, other_module, deps)
|
|
170
|
+
for other_module in modules])for module in modules)
|
|
171
|
+
|
|
172
|
+
return dependencies
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def _depends_on(module, other_module, deps):
|
|
176
|
+
"""
|
|
177
|
+
Write correct input in the dependency matrix ("Yes" if other_module is in deps of module key)
|
|
178
|
+
"""
|
|
179
|
+
return 'Yes' if other_module in deps[module] else 'No'
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Module comparing whether two elements are both None or both not None and equals
|
|
3
|
+
"""
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def custom_equal(element_1: Optional[object], element_2: Optional[object], element_type: type):
|
|
10
|
+
"""
|
|
11
|
+
Compare whether two elements are both None or both not None and equals (same type/same value)
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
element_1: the first element of the comparison
|
|
15
|
+
element_2: the second element of the comparison
|
|
16
|
+
element_type: the expected element type for both elements
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
True if both None or both not None and equals (same type/same value)
|
|
20
|
+
"""
|
|
21
|
+
if element_1 is None:
|
|
22
|
+
return element_2 is None
|
|
23
|
+
|
|
24
|
+
if not isinstance(element_1, element_type) or not isinstance(element_2, element_type):
|
|
25
|
+
return False
|
|
26
|
+
|
|
27
|
+
return np.isclose(element_1, element_2) if element_type == float else element_1 == element_2
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Module implementing postgresql connection
|
|
3
|
+
"""
|
|
4
|
+
from typing import Callable
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseSettings
|
|
7
|
+
from sqlalchemy import delete
|
|
8
|
+
from sqlmodel import create_engine
|
|
9
|
+
from sqlmodel import Session
|
|
10
|
+
from sqlmodel import SQLModel
|
|
11
|
+
|
|
12
|
+
from ecodev_core.logger import logger_get
|
|
13
|
+
log = logger_get(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class DbSettings(BaseSettings):
|
|
17
|
+
"""
|
|
18
|
+
Settings class used to connect to the postgresql database
|
|
19
|
+
"""
|
|
20
|
+
db_host: str
|
|
21
|
+
db_port: str
|
|
22
|
+
db_name: str
|
|
23
|
+
db_username: str
|
|
24
|
+
db_password: str
|
|
25
|
+
|
|
26
|
+
class Config:
|
|
27
|
+
"""
|
|
28
|
+
Config class specifying the name of the environment file to read
|
|
29
|
+
"""
|
|
30
|
+
env_file = '.env'
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
DB = DbSettings()
|
|
34
|
+
DB_URL = f'postgresql://{DB.db_username}:{DB.db_password}@{DB.db_host}:{DB.db_port}/{DB.db_name}'
|
|
35
|
+
engine = create_engine(DB_URL)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def create_db_and_tables(model: Callable) -> None:
|
|
39
|
+
"""
|
|
40
|
+
Create all tables based on the declared schemas in core/models thanks to sqlmodel
|
|
41
|
+
"""
|
|
42
|
+
log.info(f'Inserting on the fly {model} and all other domain tables')
|
|
43
|
+
SQLModel.metadata.create_all(engine)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def delete_table(model: Callable) -> None:
|
|
47
|
+
"""
|
|
48
|
+
Delete all rows of the passed model from db
|
|
49
|
+
"""
|
|
50
|
+
with Session(engine) as session:
|
|
51
|
+
result = session.execute(delete(model))
|
|
52
|
+
session.commit()
|
|
53
|
+
log.info(f'Deleted {result.rowcount} rows')
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def get_session():
|
|
57
|
+
"""
|
|
58
|
+
Retrieves the session, used in Depends() attributes of fastapi routes
|
|
59
|
+
"""
|
|
60
|
+
with Session(engine) as session:
|
|
61
|
+
yield session
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def info_message(model: Callable):
|
|
65
|
+
"""
|
|
66
|
+
Create all tables based on the declared schemas in core/models thanks to sqlmodel
|
|
67
|
+
"""
|
|
68
|
+
log.info(f'hack to get all models imported in an alembic env.py. {model}')
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Low level db filtering methods
|
|
3
|
+
"""
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from enum import Enum
|
|
6
|
+
from enum import unique
|
|
7
|
+
from typing import Callable
|
|
8
|
+
from typing import Dict
|
|
9
|
+
|
|
10
|
+
from sqlalchemy import func
|
|
11
|
+
from sqlalchemy.orm.attributes import InstrumentedAttribute
|
|
12
|
+
from sqlmodel import col
|
|
13
|
+
from sqlmodel.sql.expression import Select
|
|
14
|
+
from sqlmodel.sql.expression import SelectOfScalar
|
|
15
|
+
|
|
16
|
+
SelectOfScalar.inherit_cache = True # type: ignore
|
|
17
|
+
Select.inherit_cache = True # type: ignore
|
|
18
|
+
OPERATORS = ['>=', '<=', '!=', '=', '<', '>', 'contains ']
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@unique
|
|
22
|
+
class ServerSideFilter(str, Enum):
|
|
23
|
+
"""
|
|
24
|
+
All possible server side filtering mechanisms
|
|
25
|
+
"""
|
|
26
|
+
STARTSTR = 'start_str'
|
|
27
|
+
ILIKESTR = 'ilike_str'
|
|
28
|
+
STRICTSTR = 'strict_str'
|
|
29
|
+
LIKESTR = 'like_str'
|
|
30
|
+
BOOL = 'bool'
|
|
31
|
+
NUM = 'num'
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _filter_start_str_field(field: InstrumentedAttribute,
|
|
35
|
+
query: SelectOfScalar,
|
|
36
|
+
operator: str,
|
|
37
|
+
value: str
|
|
38
|
+
) -> SelectOfScalar:
|
|
39
|
+
"""
|
|
40
|
+
Add filter to the passed query for a str like field. The filtering is done by checking if
|
|
41
|
+
the passed value starts the field.
|
|
42
|
+
|
|
43
|
+
NB: case-insensitive!
|
|
44
|
+
"""
|
|
45
|
+
return query.where(func.lower(col(field)).startswith(value.lower())) if value else query
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _filter_str_ilike_field(field: InstrumentedAttribute,
|
|
49
|
+
query: SelectOfScalar,
|
|
50
|
+
operator: str,
|
|
51
|
+
value: str
|
|
52
|
+
) -> SelectOfScalar:
|
|
53
|
+
"""
|
|
54
|
+
Add filter to the passed query for a str like field. The filtering is done by checking if
|
|
55
|
+
the passed value is contained in db values
|
|
56
|
+
|
|
57
|
+
NB: case-insensitive!
|
|
58
|
+
"""
|
|
59
|
+
return query.where(col(field).ilike(f'%{value}%')) if value else query
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _filter_str_like_field(field: InstrumentedAttribute,
|
|
63
|
+
query: SelectOfScalar,
|
|
64
|
+
operator: str,
|
|
65
|
+
value: str
|
|
66
|
+
) -> SelectOfScalar:
|
|
67
|
+
"""
|
|
68
|
+
Add filter to the passed query for a str like field. The filtering is done by checking if
|
|
69
|
+
the passed value is contained in db values
|
|
70
|
+
"""
|
|
71
|
+
return query.where(col(field).contains(value)) if value else query
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _filter_strict_str_field(field: InstrumentedAttribute,
|
|
75
|
+
query: SelectOfScalar,
|
|
76
|
+
operator: str,
|
|
77
|
+
value: str
|
|
78
|
+
) -> SelectOfScalar:
|
|
79
|
+
"""
|
|
80
|
+
Add filter to the passed query for a strict str equality matching.
|
|
81
|
+
The filtering is done by checking if the passed value is equal to one of the db values
|
|
82
|
+
"""
|
|
83
|
+
return query.where(col(field) == value) if value else query
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _filter_bool_like_field(field: InstrumentedAttribute,
|
|
87
|
+
query: SelectOfScalar,
|
|
88
|
+
operator: str,
|
|
89
|
+
value: str
|
|
90
|
+
) -> SelectOfScalar:
|
|
91
|
+
"""
|
|
92
|
+
Add filter to the passed query for a bool like field. The filtering is done by comparing
|
|
93
|
+
the passed value to db values
|
|
94
|
+
"""
|
|
95
|
+
return query.where(col(field) == value) if value else query
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def _filter_num_like_field(field: InstrumentedAttribute,
|
|
99
|
+
query: SelectOfScalar,
|
|
100
|
+
operator: str,
|
|
101
|
+
value: str,
|
|
102
|
+
is_date: bool = False
|
|
103
|
+
) -> SelectOfScalar:
|
|
104
|
+
"""
|
|
105
|
+
Add filter to the passed query for a num like (even datetime in case where is_date
|
|
106
|
+
is set to True) field. The filtering is done by comparing the passed value to db values
|
|
107
|
+
with the passed operator.
|
|
108
|
+
"""
|
|
109
|
+
if not operator or not value:
|
|
110
|
+
return query
|
|
111
|
+
|
|
112
|
+
if operator == '>=':
|
|
113
|
+
query = query.where(col(field) >= (_date(value) if is_date else float(value)))
|
|
114
|
+
elif operator == '<=':
|
|
115
|
+
query = query.where(col(field) <= (_date(value) if is_date else float(value)))
|
|
116
|
+
elif operator == '!=':
|
|
117
|
+
query = query.where(col(field) != (_date(value) if is_date else float(value)))
|
|
118
|
+
elif operator == '=':
|
|
119
|
+
query = query.where(col(field) == (_date(value) if is_date else float(value)))
|
|
120
|
+
elif operator == '>':
|
|
121
|
+
query = query.where(col(field) > (_date(value) if is_date else float(value)))
|
|
122
|
+
elif operator == '<':
|
|
123
|
+
query = query.where(col(field) < (_date(value) if is_date else float(value)))
|
|
124
|
+
|
|
125
|
+
return query
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
SERVER_SIDE_FILTERS: Dict[ServerSideFilter, Callable] = {
|
|
129
|
+
ServerSideFilter.STARTSTR: _filter_start_str_field,
|
|
130
|
+
ServerSideFilter.STRICTSTR: _filter_strict_str_field,
|
|
131
|
+
ServerSideFilter.LIKESTR: _filter_str_like_field,
|
|
132
|
+
ServerSideFilter.ILIKESTR: _filter_str_ilike_field,
|
|
133
|
+
ServerSideFilter.BOOL: _filter_bool_like_field,
|
|
134
|
+
ServerSideFilter.NUM: _filter_num_like_field
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def _date(year: str) -> datetime:
|
|
139
|
+
"""
|
|
140
|
+
Convert the passed str year to a datetime to allow filtering on datetime years.
|
|
141
|
+
"""
|
|
142
|
+
return datetime(year=int(year), month=1, day=1)
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Module implementing functions to insert data within the db
|
|
3
|
+
"""
|
|
4
|
+
from io import BytesIO
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any
|
|
7
|
+
from typing import Callable
|
|
8
|
+
from typing import Dict
|
|
9
|
+
from typing import List
|
|
10
|
+
from typing import Union
|
|
11
|
+
|
|
12
|
+
import pandas as pd
|
|
13
|
+
from fastapi import BackgroundTasks
|
|
14
|
+
from fastapi import UploadFile
|
|
15
|
+
from pandas import ExcelFile
|
|
16
|
+
from sqlmodel import Session
|
|
17
|
+
from sqlmodel import SQLModel
|
|
18
|
+
from sqlmodel.sql.expression import SelectOfScalar
|
|
19
|
+
|
|
20
|
+
from ecodev_core.logger import log_critical
|
|
21
|
+
from ecodev_core.logger import logger_get
|
|
22
|
+
from ecodev_core.pydantic_utils import CustomFrozen
|
|
23
|
+
from ecodev_core.safe_utils import SimpleReturn
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
log = logger_get(__name__)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Insertor(CustomFrozen):
|
|
30
|
+
"""
|
|
31
|
+
Configuration class to insert date into the postgresql sb.
|
|
32
|
+
|
|
33
|
+
Attributes are:
|
|
34
|
+
|
|
35
|
+
- reductor: how to create or update a row in db
|
|
36
|
+
- db_schema: the default constructor of the sqlmodel based class defining the db table
|
|
37
|
+
- selector: the criteria on which to decide whether to create or update (example: only add
|
|
38
|
+
a user if a user with the same name is not already present in the db)
|
|
39
|
+
- convertor: how to convert the raw csv/excel passed by the user to json like db rows
|
|
40
|
+
- whether to insert data based on an xlsx (if true) or a csv (if false)
|
|
41
|
+
"""
|
|
42
|
+
reductor: Callable[[Any, Any], Any]
|
|
43
|
+
db_schema: Callable
|
|
44
|
+
selector: Callable[[Any], SelectOfScalar]
|
|
45
|
+
convertor: Callable[[Union[pd.DataFrame, ExcelFile]], List[Dict]]
|
|
46
|
+
read_excel_file: bool = True
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def generic_insertion(df_or_xl: Union[pd.DataFrame, ExcelFile, Path],
|
|
50
|
+
session: Session,
|
|
51
|
+
insertor: Callable[[Union[pd.DataFrame, pd.ExcelFile], Session], None],
|
|
52
|
+
background_tasks: Union[BackgroundTasks, None] = None):
|
|
53
|
+
"""
|
|
54
|
+
Generic insertion of temperature tool related csv into db
|
|
55
|
+
"""
|
|
56
|
+
try:
|
|
57
|
+
if background_tasks:
|
|
58
|
+
background_tasks.add_task(insertor, df_or_xl, session)
|
|
59
|
+
else:
|
|
60
|
+
insertor(df_or_xl, session)
|
|
61
|
+
return SimpleReturn.route_success()
|
|
62
|
+
except Exception as error:
|
|
63
|
+
log_critical(f'Something wrong happened: {error}', log)
|
|
64
|
+
return SimpleReturn.route_failure(str(error))
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
async def insert_file(file: UploadFile, insertor: Insertor, session: Session) -> None:
|
|
68
|
+
"""
|
|
69
|
+
Inserts an uploaded file into a database
|
|
70
|
+
"""
|
|
71
|
+
df_raw = await get_raw_df(file, insertor.read_excel_file)
|
|
72
|
+
insert_data(df_raw, insertor, session)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def insert_data(df: Union[pd.DataFrame, ExcelFile], insertor: Insertor, session: Session) -> None:
|
|
76
|
+
"""
|
|
77
|
+
Inserts a csv/df into a database
|
|
78
|
+
"""
|
|
79
|
+
for row in insertor.convertor(df):
|
|
80
|
+
session.add(create_or_update(session, row, insertor))
|
|
81
|
+
session.commit()
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def create_or_update(session: Session, row: Dict, insertor: Insertor) -> SQLModel:
|
|
85
|
+
"""
|
|
86
|
+
Create a new row in db if the selector insertor does not find existing row in db. Update the row
|
|
87
|
+
otherwise.
|
|
88
|
+
"""
|
|
89
|
+
db_row = insertor.db_schema(**row)
|
|
90
|
+
if in_db_row := session.exec(insertor.selector(db_row)).first():
|
|
91
|
+
return insertor.reductor(in_db_row, db_row)
|
|
92
|
+
return db_row
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
async def get_raw_df(file: UploadFile,
|
|
96
|
+
read_excel_file: bool,
|
|
97
|
+
sep: str = ',') -> Union[pd.DataFrame, ExcelFile]:
|
|
98
|
+
"""
|
|
99
|
+
Retrieves the raw data from the uploaded file at pandas format
|
|
100
|
+
"""
|
|
101
|
+
contents = await file.read()
|
|
102
|
+
if read_excel_file:
|
|
103
|
+
return pd.ExcelFile(contents)
|
|
104
|
+
|
|
105
|
+
buffer = BytesIO(contents)
|
|
106
|
+
raw_content = pd.read_csv(buffer, sep=sep)
|
|
107
|
+
buffer.close()
|
|
108
|
+
return raw_content
|