pjdev-sqlmodel 4.3.0__tar.gz → 4.3.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/PKG-INFO +1 -1
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/src/pjdev_sqlmodel/__about__.py +1 -1
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/src/pjdev_sqlmodel/utilities.py +113 -33
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/.gitignore +0 -0
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/LICENSE.txt +0 -0
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/README.md +0 -0
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/pyproject.toml +0 -0
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/src/pjdev_sqlmodel/__init__.py +0 -0
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/src/pjdev_sqlmodel/models/__init__.py +0 -0
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/src/pjdev_sqlmodel/models/db_models.py +0 -0
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/src/pjdev_sqlmodel/models/settings_models.py +0 -0
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/src/pjdev_sqlmodel/settings_service.py +0 -0
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/src/pjdev_sqlmodel/sqlmodel_service.py +0 -0
- {pjdev_sqlmodel-4.3.0 → pjdev_sqlmodel-4.3.2}/tests/__init__.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pjdev-sqlmodel
|
|
3
|
-
Version: 4.3.
|
|
3
|
+
Version: 4.3.2
|
|
4
4
|
Project-URL: Documentation, https://gitlab.purplejay.net/keystone/python
|
|
5
5
|
Project-URL: Issues, https://gitlab.purplejay.net/keystone/python/issues
|
|
6
6
|
Project-URL: Source, https://gitlab.purplejay.net/keystone/python
|
|
@@ -8,6 +8,7 @@ import pandas as pd
|
|
|
8
8
|
from openpyxl.reader.excel import load_workbook
|
|
9
9
|
from loguru import logger
|
|
10
10
|
from sqlalchemy import Engine
|
|
11
|
+
from sqlmodel import select
|
|
11
12
|
|
|
12
13
|
from pjdev_sqlmodel import session_context, sqlmodel_service
|
|
13
14
|
from pjdev_sqlmodel.models import ModelBase
|
|
@@ -15,10 +16,15 @@ from pjdev_sqlmodel.models import ModelBase
|
|
|
15
16
|
T = TypeVar("T", bound=ModelBase)
|
|
16
17
|
|
|
17
18
|
|
|
18
|
-
def get_files_in_directory(directory: Path) -> List[Path]:
|
|
19
|
-
|
|
20
|
-
f for f in directory.glob("
|
|
19
|
+
def get_files_in_directory(directory: Path, file_name='*', force_presence=False) -> List[Path]:
|
|
20
|
+
file_obj = [f for f in directory.glob(f"**/{file_name}.xlsx") if not f.name.startswith("~$")] + [
|
|
21
|
+
f for f in directory.glob(f"**/{file_name}.csv")
|
|
21
22
|
]
|
|
23
|
+
if force_presence and len(file_obj) == 0:
|
|
24
|
+
logger.error(
|
|
25
|
+
f'Failed to find files matching the {directory}/{file_name} pattern and presence is required. Exiting...')
|
|
26
|
+
exit(1)
|
|
27
|
+
return file_obj
|
|
22
28
|
|
|
23
29
|
|
|
24
30
|
def get_csv_columns(file_path):
|
|
@@ -54,7 +60,7 @@ def load_csv_data(model_type: Type[T], data_files: List[Path]) -> None:
|
|
|
54
60
|
f
|
|
55
61
|
for f in data_files
|
|
56
62
|
if f.name.endswith(".csv")
|
|
57
|
-
|
|
63
|
+
and len(set(cols).difference(set(get_csv_columns(f)))) == 0
|
|
58
64
|
]
|
|
59
65
|
|
|
60
66
|
data: List[model_type] = []
|
|
@@ -69,10 +75,11 @@ def load_csv_data(model_type: Type[T], data_files: List[Path]) -> None:
|
|
|
69
75
|
|
|
70
76
|
|
|
71
77
|
def load_excel_data(
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
78
|
+
model_type: Type[T],
|
|
79
|
+
data_files: List[Path],
|
|
80
|
+
header_ndx: int = 0,
|
|
81
|
+
sheet_name: str | int = 0,
|
|
82
|
+
col_range: Optional[str] = None,
|
|
76
83
|
) -> None:
|
|
77
84
|
fields = model_type.model_fields.keys()
|
|
78
85
|
cols = [
|
|
@@ -87,10 +94,10 @@ def load_excel_data(
|
|
|
87
94
|
f
|
|
88
95
|
for f in data_files
|
|
89
96
|
if f.name.endswith(".xlsx")
|
|
90
|
-
|
|
97
|
+
and len(
|
|
91
98
|
set(cols).difference(set(get_excel_columns(f, header_ndx + 1, col_range)))
|
|
92
99
|
)
|
|
93
|
-
|
|
100
|
+
== 0
|
|
94
101
|
]
|
|
95
102
|
|
|
96
103
|
data: List[model_type] = []
|
|
@@ -101,7 +108,7 @@ def load_excel_data(
|
|
|
101
108
|
)
|
|
102
109
|
|
|
103
110
|
for file in filtered_files:
|
|
104
|
-
df = __read_excel(file, cols, header_ndx)
|
|
111
|
+
df = __read_excel(file=file, cols=cols, header_ndx=header_ndx, sheet_name=sheet_name)
|
|
105
112
|
data.extend(__convert_to_models(file.name, df, model_type))
|
|
106
113
|
|
|
107
114
|
with session_context() as session:
|
|
@@ -111,7 +118,7 @@ def load_excel_data(
|
|
|
111
118
|
|
|
112
119
|
|
|
113
120
|
def __convert_to_models(
|
|
114
|
-
|
|
121
|
+
filename: str, df: pd.DataFrame, model_type: Type[T]
|
|
115
122
|
) -> List[T]:
|
|
116
123
|
data: List[model_type] = []
|
|
117
124
|
try:
|
|
@@ -125,10 +132,10 @@ def __convert_to_models(
|
|
|
125
132
|
|
|
126
133
|
|
|
127
134
|
def load_raw_csv_data(
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
135
|
+
filename: Path,
|
|
136
|
+
table_name: str,
|
|
137
|
+
data_type_map: Optional[Dict[str, Type]] = None,
|
|
138
|
+
engine: Optional[Engine] = None,
|
|
132
139
|
) -> None:
|
|
133
140
|
df = __read_csv(filename, data_type_map=data_type_map)
|
|
134
141
|
|
|
@@ -146,13 +153,14 @@ def load_raw_csv_data(
|
|
|
146
153
|
|
|
147
154
|
|
|
148
155
|
def load_raw_excel_data(
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
156
|
+
filename: Path,
|
|
157
|
+
table_name: str,
|
|
158
|
+
header_ndx: int = 0,
|
|
159
|
+
sheet_name: str | int = 0,
|
|
160
|
+
data_type_map: Optional[Dict[str, Type]] = None,
|
|
161
|
+
engine: Optional[Engine] = None,
|
|
154
162
|
) -> None:
|
|
155
|
-
df = __read_excel(filename, header_ndx=header_ndx, data_type_map=data_type_map)
|
|
163
|
+
df = __read_excel(filename, header_ndx=header_ndx, data_type_map=data_type_map, sheet_name=sheet_name)
|
|
156
164
|
|
|
157
165
|
if df is None:
|
|
158
166
|
return
|
|
@@ -167,10 +175,80 @@ def load_raw_excel_data(
|
|
|
167
175
|
)
|
|
168
176
|
|
|
169
177
|
|
|
178
|
+
def convert_table_to_df(InputTable: type[BaseModel]):
|
|
179
|
+
with session_context() as session:
|
|
180
|
+
statement = select(InputTable)
|
|
181
|
+
results = session.exec(statement).all()
|
|
182
|
+
|
|
183
|
+
headers = []
|
|
184
|
+
field_names = []
|
|
185
|
+
for name, annotation in InputTable.__annotations__.items():
|
|
186
|
+
field = InputTable.__fields__[name]
|
|
187
|
+
alias = field.alias if field.alias else name
|
|
188
|
+
headers.append(alias)
|
|
189
|
+
field_names.append(name)
|
|
190
|
+
|
|
191
|
+
data = []
|
|
192
|
+
for res in results:
|
|
193
|
+
new_obj = {}
|
|
194
|
+
for i in range(len(headers)):
|
|
195
|
+
header = headers[i]
|
|
196
|
+
field_name = field_names[i]
|
|
197
|
+
new_obj[header] = getattr(res, field_name)
|
|
198
|
+
data.append(new_obj)
|
|
199
|
+
df = pd.DataFrame(data)
|
|
200
|
+
return df
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def convert_table_to_csv(InputTable: type[BaseModel], file: Path):
|
|
204
|
+
df = convert_table_to_df(InputTable)
|
|
205
|
+
df.to_csv(file, index=False)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def export_to_sheet(InputTable: type[BaseModel], wb: Workbook, sheet_name: str, table_name: str = None,
|
|
209
|
+
hide_sheet=False):
|
|
210
|
+
df = convert_table_to_df(InputTable)
|
|
211
|
+
ws = wb.create_sheet(title=sheet_name)
|
|
212
|
+
if hide_sheet:
|
|
213
|
+
ws.sheet_state = 'hidden'
|
|
214
|
+
|
|
215
|
+
if table_name:
|
|
216
|
+
ws.merge_cells(start_row=1, start_column=1, end_row=1, end_column=len(df.columns))
|
|
217
|
+
cell = ws.cell(row=1, column=1)
|
|
218
|
+
cell.value = table_name
|
|
219
|
+
cell.font = Font(bold=True)
|
|
220
|
+
cell.alignment = Alignment(horizontal="center")
|
|
221
|
+
start_row = 2
|
|
222
|
+
ws.row_dimensions[1].height = 40.5
|
|
223
|
+
ws.row_dimensions[2].height = 30.75
|
|
224
|
+
else:
|
|
225
|
+
start_row = 1
|
|
226
|
+
ws.row_dimensions[1].height = 30.75
|
|
227
|
+
|
|
228
|
+
for r_idx, row in enumerate(dataframe_to_rows(df, index=False, header=True), start=start_row):
|
|
229
|
+
for c_idx, value in enumerate(row, start=1):
|
|
230
|
+
cell = ws.cell(row=r_idx, column=c_idx, value=value)
|
|
231
|
+
if r_idx == start_row:
|
|
232
|
+
cell.font = Font(bold=True)
|
|
233
|
+
cell.alignment = Alignment(horizontal="center")
|
|
234
|
+
|
|
235
|
+
for col_idx in range(1, len(df.columns) + 1):
|
|
236
|
+
column_letter = get_column_letter(col_idx)
|
|
237
|
+
max_length = 0
|
|
238
|
+
for cell in ws[column_letter]:
|
|
239
|
+
try:
|
|
240
|
+
if cell.value:
|
|
241
|
+
max_length = max(max_length, len(str(cell.value)))
|
|
242
|
+
except:
|
|
243
|
+
pass
|
|
244
|
+
adjusted_width = (max_length + 2)
|
|
245
|
+
ws.column_dimensions[column_letter].width = adjusted_width
|
|
246
|
+
|
|
247
|
+
|
|
170
248
|
def __read_csv(
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
249
|
+
file: Path | str,
|
|
250
|
+
cols: Optional[List[str]] = None,
|
|
251
|
+
data_type_map: Optional[Dict[str, Type]] = None,
|
|
174
252
|
) -> pd.DataFrame:
|
|
175
253
|
return pd.read_csv(
|
|
176
254
|
file, engine="pyarrow", usecols=cols, na_filter=False, dtype=data_type_map
|
|
@@ -178,14 +256,16 @@ def __read_csv(
|
|
|
178
256
|
|
|
179
257
|
|
|
180
258
|
def __read_excel(
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
259
|
+
file: Path | str,
|
|
260
|
+
sheet_name: str | int = 0,
|
|
261
|
+
cols: Optional[List[str]] = None,
|
|
262
|
+
header_ndx: int = 0,
|
|
263
|
+
data_type_map: Optional[Dict[str, Type]] = None,
|
|
185
264
|
) -> pd.DataFrame:
|
|
186
265
|
return pd.read_excel(
|
|
187
266
|
io=file,
|
|
188
267
|
usecols=cols,
|
|
268
|
+
sheet_name=sheet_name,
|
|
189
269
|
na_filter=False,
|
|
190
270
|
header=header_ndx,
|
|
191
271
|
engine="calamine",
|
|
@@ -194,10 +274,10 @@ def __read_excel(
|
|
|
194
274
|
|
|
195
275
|
|
|
196
276
|
def convert_to_csv(
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
277
|
+
data: List[BaseModel],
|
|
278
|
+
col_mapping_tuple: Tuple[List[str], Dict[str, str], List[str]],
|
|
279
|
+
filename: Path,
|
|
280
|
+
index=False,
|
|
201
281
|
) -> None:
|
|
202
282
|
include_set, col_mapping, cols = col_mapping_tuple
|
|
203
283
|
dict_data = [
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|