dragon-ml-toolbox 20.2.0__py3-none-any.whl → 20.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.4.0.dist-info}/METADATA +1 -1
- dragon_ml_toolbox-20.4.0.dist-info/RECORD +143 -0
- ml_tools/ETL_cleaning/__init__.py +5 -1
- ml_tools/ETL_cleaning/_basic_clean.py +1 -1
- ml_tools/ETL_engineering/__init__.py +5 -1
- ml_tools/GUI_tools/__init__.py +5 -1
- ml_tools/IO_tools/_IO_loggers.py +33 -21
- ml_tools/IO_tools/__init__.py +5 -1
- ml_tools/MICE/__init__.py +8 -2
- ml_tools/MICE/_dragon_mice.py +1 -1
- ml_tools/ML_callbacks/__init__.py +5 -1
- ml_tools/ML_chain/__init__.py +5 -1
- ml_tools/ML_configuration/__init__.py +7 -1
- ml_tools/ML_configuration/_training.py +65 -1
- ml_tools/ML_datasetmaster/__init__.py +5 -1
- ml_tools/ML_datasetmaster/_base_datasetmaster.py +31 -20
- ml_tools/ML_datasetmaster/_datasetmaster.py +26 -9
- ml_tools/ML_datasetmaster/_sequence_datasetmaster.py +38 -23
- ml_tools/ML_evaluation/__init__.py +5 -1
- ml_tools/ML_evaluation/_classification.py +10 -2
- ml_tools/ML_evaluation_captum/__init__.py +5 -1
- ml_tools/ML_finalize_handler/__init__.py +5 -1
- ml_tools/ML_inference/__init__.py +5 -1
- ml_tools/ML_inference_sequence/__init__.py +5 -1
- ml_tools/ML_inference_vision/__init__.py +5 -1
- ml_tools/ML_models/__init__.py +21 -6
- ml_tools/ML_models/_dragon_autoint.py +302 -0
- ml_tools/ML_models/_dragon_gate.py +358 -0
- ml_tools/ML_models/_dragon_node.py +268 -0
- ml_tools/ML_models/_dragon_tabnet.py +255 -0
- ml_tools/ML_models_sequence/__init__.py +5 -1
- ml_tools/ML_models_vision/__init__.py +5 -1
- ml_tools/ML_optimization/__init__.py +11 -3
- ml_tools/ML_optimization/_multi_dragon.py +24 -8
- ml_tools/ML_optimization/_single_dragon.py +47 -67
- ml_tools/ML_optimization/_single_manual.py +1 -1
- ml_tools/ML_scaler/_ML_scaler.py +12 -7
- ml_tools/ML_scaler/__init__.py +5 -1
- ml_tools/ML_trainer/__init__.py +5 -1
- ml_tools/ML_trainer/_base_trainer.py +136 -13
- ml_tools/ML_trainer/_dragon_detection_trainer.py +31 -91
- ml_tools/ML_trainer/_dragon_sequence_trainer.py +24 -74
- ml_tools/ML_trainer/_dragon_trainer.py +24 -85
- ml_tools/ML_utilities/__init__.py +5 -1
- ml_tools/ML_utilities/_inspection.py +44 -30
- ml_tools/ML_vision_transformers/__init__.py +8 -2
- ml_tools/PSO_optimization/__init__.py +5 -1
- ml_tools/SQL/__init__.py +8 -2
- ml_tools/VIF/__init__.py +5 -1
- ml_tools/data_exploration/__init__.py +4 -1
- ml_tools/data_exploration/_cleaning.py +4 -2
- ml_tools/ensemble_evaluation/__init__.py +5 -1
- ml_tools/ensemble_inference/__init__.py +5 -1
- ml_tools/ensemble_learning/__init__.py +5 -1
- ml_tools/excel_handler/__init__.py +5 -1
- ml_tools/keys/__init__.py +5 -1
- ml_tools/keys/_keys.py +1 -1
- ml_tools/math_utilities/__init__.py +5 -1
- ml_tools/optimization_tools/__init__.py +5 -1
- ml_tools/path_manager/__init__.py +8 -2
- ml_tools/plot_fonts/__init__.py +8 -2
- ml_tools/schema/__init__.py +8 -2
- ml_tools/schema/_feature_schema.py +3 -3
- ml_tools/serde/__init__.py +5 -1
- ml_tools/utilities/__init__.py +5 -1
- ml_tools/utilities/_utility_save_load.py +38 -20
- dragon_ml_toolbox-20.2.0.dist-info/RECORD +0 -179
- ml_tools/ETL_cleaning/_imprimir.py +0 -13
- ml_tools/ETL_engineering/_imprimir.py +0 -24
- ml_tools/GUI_tools/_imprimir.py +0 -12
- ml_tools/IO_tools/_imprimir.py +0 -14
- ml_tools/MICE/_imprimir.py +0 -11
- ml_tools/ML_callbacks/_imprimir.py +0 -12
- ml_tools/ML_chain/_imprimir.py +0 -12
- ml_tools/ML_configuration/_imprimir.py +0 -47
- ml_tools/ML_datasetmaster/_imprimir.py +0 -15
- ml_tools/ML_evaluation/_imprimir.py +0 -25
- ml_tools/ML_evaluation_captum/_imprimir.py +0 -10
- ml_tools/ML_finalize_handler/_imprimir.py +0 -8
- ml_tools/ML_inference/_imprimir.py +0 -11
- ml_tools/ML_inference_sequence/_imprimir.py +0 -8
- ml_tools/ML_inference_vision/_imprimir.py +0 -8
- ml_tools/ML_models/_advanced_models.py +0 -1086
- ml_tools/ML_models/_imprimir.py +0 -18
- ml_tools/ML_models_sequence/_imprimir.py +0 -8
- ml_tools/ML_models_vision/_imprimir.py +0 -16
- ml_tools/ML_optimization/_imprimir.py +0 -13
- ml_tools/ML_scaler/_imprimir.py +0 -8
- ml_tools/ML_trainer/_imprimir.py +0 -10
- ml_tools/ML_utilities/_imprimir.py +0 -16
- ml_tools/ML_vision_transformers/_imprimir.py +0 -14
- ml_tools/PSO_optimization/_imprimir.py +0 -10
- ml_tools/SQL/_imprimir.py +0 -8
- ml_tools/VIF/_imprimir.py +0 -10
- ml_tools/data_exploration/_imprimir.py +0 -32
- ml_tools/ensemble_evaluation/_imprimir.py +0 -14
- ml_tools/ensemble_inference/_imprimir.py +0 -9
- ml_tools/ensemble_learning/_imprimir.py +0 -10
- ml_tools/excel_handler/_imprimir.py +0 -13
- ml_tools/keys/_imprimir.py +0 -11
- ml_tools/math_utilities/_imprimir.py +0 -11
- ml_tools/optimization_tools/_imprimir.py +0 -13
- ml_tools/path_manager/_imprimir.py +0 -15
- ml_tools/plot_fonts/_imprimir.py +0 -8
- ml_tools/schema/_imprimir.py +0 -10
- ml_tools/serde/_imprimir.py +0 -10
- ml_tools/utilities/_imprimir.py +0 -18
- {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.4.0.dist-info}/WHEEL +0 -0
- {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.4.0.dist-info}/licenses/LICENSE +0 -0
- {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.4.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +0 -0
- {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.4.0.dist-info}/top_level.txt +0 -0
|
@@ -35,7 +35,8 @@ class DragonDataset(_BaseDatasetMaker):
|
|
|
35
35
|
validation_size: float = 0.2,
|
|
36
36
|
test_size: float = 0.1,
|
|
37
37
|
class_map: Optional[dict[str,int]]=None,
|
|
38
|
-
random_state: int = 42
|
|
38
|
+
random_state: int = 42,
|
|
39
|
+
verbose: int = 2):
|
|
39
40
|
"""
|
|
40
41
|
Args:
|
|
41
42
|
pandas_df (pandas.DataFrame):
|
|
@@ -59,6 +60,11 @@ class DragonDataset(_BaseDatasetMaker):
|
|
|
59
60
|
- "none": Do not scale data (e.g., for TabularTransformer).
|
|
60
61
|
- DragonScaler instance: Use a pre-fitted scaler to transform data.
|
|
61
62
|
target_scaler: Strategy for target scaling. ONLY applies for "regression" tasks.
|
|
63
|
+
verbose (int): Verbosity level for logging.
|
|
64
|
+
- 0: Errors only
|
|
65
|
+
- 1: Warnings
|
|
66
|
+
- 2: Info
|
|
67
|
+
- 3: Detailed process info
|
|
62
68
|
"""
|
|
63
69
|
super().__init__()
|
|
64
70
|
|
|
@@ -130,10 +136,11 @@ class DragonDataset(_BaseDatasetMaker):
|
|
|
130
136
|
|
|
131
137
|
if _apply_f_scaling:
|
|
132
138
|
X_train_final, X_val_final, X_test_final = self._prepare_feature_scaler(
|
|
133
|
-
X_train, y_train, X_val, X_test, label_dtype, schema
|
|
139
|
+
X_train, y_train, X_val, X_test, label_dtype, schema, verbose=verbose
|
|
134
140
|
)
|
|
135
141
|
else:
|
|
136
|
-
|
|
142
|
+
if verbose >= 2:
|
|
143
|
+
_LOGGER.info("Features have not been scaled as specified.")
|
|
137
144
|
X_train_final, X_val_final, X_test_final = X_train.to_numpy(), X_val.to_numpy(), X_test.to_numpy()
|
|
138
145
|
|
|
139
146
|
# --- 5. Scale Targets (Regression Only) ---
|
|
@@ -152,7 +159,7 @@ class DragonDataset(_BaseDatasetMaker):
|
|
|
152
159
|
raise ValueError()
|
|
153
160
|
|
|
154
161
|
if _apply_t_scaling:
|
|
155
|
-
y_train_final, y_val_final, y_test_final = self._prepare_target_scaler(y_train, y_val, y_test)
|
|
162
|
+
y_train_final, y_val_final, y_test_final = self._prepare_target_scaler(y_train, y_val, y_test, verbose=verbose)
|
|
156
163
|
else:
|
|
157
164
|
y_train_final = y_train.to_numpy() if isinstance(y_train, (pandas.Series, pandas.DataFrame)) else y_train
|
|
158
165
|
y_val_final = y_val.to_numpy() if isinstance(y_val, (pandas.Series, pandas.DataFrame)) else y_val
|
|
@@ -174,6 +181,8 @@ class DragonDataset(_BaseDatasetMaker):
|
|
|
174
181
|
# --- 8. Set class map if classification ---
|
|
175
182
|
if self.kind != MLTaskKeys.REGRESSION:
|
|
176
183
|
if class_map is None:
|
|
184
|
+
if verbose >= 1:
|
|
185
|
+
_LOGGER.warning("No class map provided for classification task at initialization. Use `.set_class_map()`.")
|
|
177
186
|
self.class_map = dict()
|
|
178
187
|
else:
|
|
179
188
|
self.set_class_map(class_map)
|
|
@@ -239,7 +248,8 @@ class DragonDatasetMulti(_BaseDatasetMaker):
|
|
|
239
248
|
target_scaler: Union[Literal["fit"], Literal["none"], DragonScaler] = "fit",
|
|
240
249
|
validation_size: float = 0.2,
|
|
241
250
|
test_size: float = 0.1,
|
|
242
|
-
random_state: int = 42
|
|
251
|
+
random_state: int = 42,
|
|
252
|
+
verbose: int = 2):
|
|
243
253
|
"""
|
|
244
254
|
Args:
|
|
245
255
|
pandas_df (pandas.DataFrame):
|
|
@@ -264,6 +274,11 @@ class DragonDatasetMulti(_BaseDatasetMaker):
|
|
|
264
274
|
- "none": Do not scale data (e.g., for TabularTransformer).
|
|
265
275
|
- DragonScaler instance: Use a pre-fitted scaler to transform data.
|
|
266
276
|
target_scaler: Strategy for target scaling (Regression only).
|
|
277
|
+
verbose (int): Verbosity level for logging.
|
|
278
|
+
- 0: Errors only
|
|
279
|
+
- 1: Warnings
|
|
280
|
+
- 2: Info
|
|
281
|
+
- 3: Detailed process info
|
|
267
282
|
"""
|
|
268
283
|
super().__init__()
|
|
269
284
|
|
|
@@ -296,7 +311,8 @@ class DragonDatasetMulti(_BaseDatasetMaker):
|
|
|
296
311
|
|
|
297
312
|
schema_plus_targets = feature_cols_set.union(target_cols_set)
|
|
298
313
|
if (all_cols_set - schema_plus_targets):
|
|
299
|
-
|
|
314
|
+
if verbose >= 1:
|
|
315
|
+
_LOGGER.warning(f"Columns in DataFrame but not in schema or targets: {list(all_cols_set - schema_plus_targets)}")
|
|
300
316
|
|
|
301
317
|
if (schema_plus_targets - all_cols_set):
|
|
302
318
|
_LOGGER.error(f"Columns in schema/targets but not in DataFrame: {list(schema_plus_targets - all_cols_set)}")
|
|
@@ -335,10 +351,11 @@ class DragonDatasetMulti(_BaseDatasetMaker):
|
|
|
335
351
|
|
|
336
352
|
if _apply_f_scaling:
|
|
337
353
|
X_train_final, X_val_final, X_test_final = self._prepare_feature_scaler(
|
|
338
|
-
X_train, y_train, X_val, X_test, label_dtype, schema
|
|
354
|
+
X_train, y_train, X_val, X_test, label_dtype, schema, verbose=verbose
|
|
339
355
|
)
|
|
340
356
|
else:
|
|
341
|
-
|
|
357
|
+
if verbose >= 2:
|
|
358
|
+
_LOGGER.info("Features have not been scaled as specified.")
|
|
342
359
|
X_train_final, X_val_final, X_test_final = X_train.to_numpy(), X_val.to_numpy(), X_test.to_numpy()
|
|
343
360
|
|
|
344
361
|
# --- 5. Scale Targets ---
|
|
@@ -357,7 +374,7 @@ class DragonDatasetMulti(_BaseDatasetMaker):
|
|
|
357
374
|
raise ValueError()
|
|
358
375
|
|
|
359
376
|
if _apply_t_scaling:
|
|
360
|
-
y_train_final, y_val_final, y_test_final = self._prepare_target_scaler(y_train, y_val, y_test)
|
|
377
|
+
y_train_final, y_val_final, y_test_final = self._prepare_target_scaler(y_train, y_val, y_test, verbose=verbose)
|
|
361
378
|
else:
|
|
362
379
|
y_train_final, y_val_final, y_test_final = y_train.to_numpy(), y_val.to_numpy(), y_test.to_numpy()
|
|
363
380
|
else:
|
|
@@ -39,7 +39,8 @@ class DragonDatasetSequence:
|
|
|
39
39
|
data: Union[pandas.DataFrame, pandas.Series, numpy.ndarray],
|
|
40
40
|
sequence_length: int,
|
|
41
41
|
validation_size: float = 0.2,
|
|
42
|
-
test_size: float = 0.1
|
|
42
|
+
test_size: float = 0.1,
|
|
43
|
+
verbose: int = 2):
|
|
43
44
|
"""
|
|
44
45
|
Initializes the dataset manager and automatically processes the data.
|
|
45
46
|
|
|
@@ -59,6 +60,11 @@ class DragonDatasetSequence:
|
|
|
59
60
|
sequence_length (int): The number of time steps in each input window (X).
|
|
60
61
|
validation_size (float): The fraction of data to hold out for validation.
|
|
61
62
|
test_size (float): The fraction of data to hold out for testing.
|
|
63
|
+
verbose (int): Verbosity level for logging.
|
|
64
|
+
- 0: Errors only
|
|
65
|
+
- 1: Warnings
|
|
66
|
+
- 2: Info
|
|
67
|
+
- 3: Detailed process info
|
|
62
68
|
"""
|
|
63
69
|
self._train_dataset = None
|
|
64
70
|
self._test_dataset = None
|
|
@@ -98,23 +104,24 @@ class DragonDatasetSequence:
|
|
|
98
104
|
self._are_windows_generated = False
|
|
99
105
|
|
|
100
106
|
# Automation
|
|
101
|
-
self._split_data(validation_size=validation_size, test_size=test_size)
|
|
102
|
-
self._normalize_data()
|
|
103
|
-
self._generate_windows()
|
|
107
|
+
self._split_data(validation_size=validation_size, test_size=test_size, verbose=verbose)
|
|
108
|
+
self._normalize_data(verbose=verbose)
|
|
109
|
+
self._generate_windows(verbose=verbose)
|
|
104
110
|
|
|
105
|
-
def _split_data(self, validation_size: float = 0.2, test_size: float = 0.1) -> None:
|
|
111
|
+
def _split_data(self, validation_size: float = 0.2, test_size: float = 0.1, verbose: int = 3) -> None:
|
|
106
112
|
"""
|
|
107
113
|
Splits the sequence chronologically into training, validation, and testing portions.
|
|
108
114
|
|
|
109
115
|
To prevent windowing errors, the validation and test sets include an overlap of `sequence_length` from the preceding data.
|
|
110
116
|
"""
|
|
111
117
|
if self._is_split:
|
|
112
|
-
|
|
118
|
+
if verbose >= 1:
|
|
119
|
+
_LOGGER.warning("Data has already been split.")
|
|
113
120
|
return
|
|
114
121
|
|
|
115
122
|
if (validation_size + test_size) >= 1.0:
|
|
116
123
|
_LOGGER.error(f"The sum of validation_size ({validation_size}) and test_size ({test_size}) must be less than 1.0.")
|
|
117
|
-
raise ValueError(
|
|
124
|
+
raise ValueError()
|
|
118
125
|
|
|
119
126
|
total_size = len(self.sequence)
|
|
120
127
|
|
|
@@ -139,9 +146,10 @@ class DragonDatasetSequence:
|
|
|
139
146
|
self.test_time_axis = self.time_axis[test_split_idx:]
|
|
140
147
|
|
|
141
148
|
self._is_split = True
|
|
142
|
-
|
|
149
|
+
if verbose >= 2:
|
|
150
|
+
_LOGGER.info(f"Sequence split into training ({len(self.train_sequence)}), validation ({len(self.val_sequence)}), and testing ({len(self.test_sequence)}) points.")
|
|
143
151
|
|
|
144
|
-
def _normalize_data(self) -> None:
|
|
152
|
+
def _normalize_data(self, verbose: int = 3) -> None:
|
|
145
153
|
"""
|
|
146
154
|
Normalizes the sequence data using DragonScaler. Must be called AFTER splitting to prevent data leakage from the test set.
|
|
147
155
|
"""
|
|
@@ -150,7 +158,8 @@ class DragonDatasetSequence:
|
|
|
150
158
|
raise RuntimeError()
|
|
151
159
|
|
|
152
160
|
if self.scaler:
|
|
153
|
-
|
|
161
|
+
if verbose >= 1:
|
|
162
|
+
_LOGGER.warning("Data has already been normalized.")
|
|
154
163
|
return
|
|
155
164
|
|
|
156
165
|
# 1. DragonScaler requires a Dataset to fit. Create a temporary one.
|
|
@@ -163,8 +172,9 @@ class DragonDatasetSequence:
|
|
|
163
172
|
|
|
164
173
|
# 2. Fit the DragonScaler on the temporary training dataset.
|
|
165
174
|
# The sequence is a single feature, so its index is [0].
|
|
166
|
-
|
|
167
|
-
|
|
175
|
+
if verbose >= 3:
|
|
176
|
+
_LOGGER.info("Fitting DragonScaler on the training data...")
|
|
177
|
+
self.scaler = DragonScaler.fit(temp_train_ds, continuous_feature_indices=[0], verbose=verbose)
|
|
168
178
|
|
|
169
179
|
# 3. Transform sequences using the fitted scaler.
|
|
170
180
|
# The transform method requires a tensor, so we convert, transform, and convert back.
|
|
@@ -177,9 +187,10 @@ class DragonDatasetSequence:
|
|
|
177
187
|
self.test_sequence = self.scaler.transform(test_tensor).numpy().flatten()
|
|
178
188
|
|
|
179
189
|
self._is_normalized = True
|
|
180
|
-
|
|
190
|
+
if verbose >= 2:
|
|
191
|
+
_LOGGER.info("Sequence data normalized using DragonScaler.")
|
|
181
192
|
|
|
182
|
-
def _generate_windows(self) -> None:
|
|
193
|
+
def _generate_windows(self, verbose: int = 3) -> None:
|
|
183
194
|
"""
|
|
184
195
|
Generates overlapping windows for features and labels.
|
|
185
196
|
"""
|
|
@@ -192,12 +203,13 @@ class DragonDatasetSequence:
|
|
|
192
203
|
raise RuntimeError()
|
|
193
204
|
|
|
194
205
|
if self._are_windows_generated:
|
|
195
|
-
|
|
206
|
+
if verbose >= 1:
|
|
207
|
+
_LOGGER.warning("Windows have already been generated.")
|
|
196
208
|
return
|
|
197
209
|
|
|
198
|
-
self._train_dataset = self._create_windowed_dataset(self.train_sequence) # type: ignore
|
|
199
|
-
self._val_dataset = self._create_windowed_dataset(self.val_sequence) # type: ignore
|
|
200
|
-
self._test_dataset = self._create_windowed_dataset(self.test_sequence) # type: ignore
|
|
210
|
+
self._train_dataset = self._create_windowed_dataset(self.train_sequence, verbose=verbose) # type: ignore
|
|
211
|
+
self._val_dataset = self._create_windowed_dataset(self.val_sequence, verbose=verbose) # type: ignore
|
|
212
|
+
self._test_dataset = self._create_windowed_dataset(self.test_sequence, verbose=verbose) # type: ignore
|
|
201
213
|
|
|
202
214
|
# attach feature scaler and target scaler to datasets
|
|
203
215
|
if self.scaler is not None:
|
|
@@ -207,13 +219,15 @@ class DragonDatasetSequence:
|
|
|
207
219
|
ds._target_scaler = self.scaler # type: ignore
|
|
208
220
|
|
|
209
221
|
self._are_windows_generated = True
|
|
210
|
-
|
|
222
|
+
if verbose >= 2:
|
|
223
|
+
_LOGGER.info("Feature and label windows generated for train, validation, and test sets.")
|
|
211
224
|
|
|
212
|
-
def _create_windowed_dataset(self, data: numpy.ndarray) -> Dataset:
|
|
225
|
+
def _create_windowed_dataset(self, data: numpy.ndarray, verbose: int = 3) -> Dataset:
|
|
213
226
|
"""Efficiently creates windowed features and labels using numpy."""
|
|
214
227
|
if len(data) <= self.sequence_length:
|
|
215
228
|
# Validation/Test sets of size 0 might be passed
|
|
216
|
-
|
|
229
|
+
if verbose >= 1:
|
|
230
|
+
_LOGGER.warning(f"Data length ({len(data)}) is not greater than sequence_length ({self.sequence_length}). Cannot create windows. Returning empty dataset.")
|
|
217
231
|
return _PytorchDataset(numpy.array([]), numpy.array([]), labels_dtype=torch.float32)
|
|
218
232
|
|
|
219
233
|
# Define a generic name for the univariate feature
|
|
@@ -253,7 +267,7 @@ class DragonDatasetSequence:
|
|
|
253
267
|
feature_names=f_names,
|
|
254
268
|
target_names=t_names)
|
|
255
269
|
|
|
256
|
-
def plot_splits(self, save_dir: Union[str, Path]):
|
|
270
|
+
def plot_splits(self, save_dir: Union[str, Path], verbose: int = 3) -> None:
|
|
257
271
|
"""Plots the training, validation and testing data."""
|
|
258
272
|
if not self._is_split:
|
|
259
273
|
_LOGGER.error("Cannot plot before splitting data.")
|
|
@@ -289,7 +303,8 @@ class DragonDatasetSequence:
|
|
|
289
303
|
|
|
290
304
|
plt.tight_layout()
|
|
291
305
|
plt.savefig(full_path)
|
|
292
|
-
|
|
306
|
+
if verbose >= 2:
|
|
307
|
+
_LOGGER.info(f"📈 Sequence data splits saved as '{full_path.name}'.")
|
|
293
308
|
plt.close()
|
|
294
309
|
|
|
295
310
|
def get_datasets(self) -> tuple[Dataset, Dataset, Dataset]:
|
|
@@ -28,7 +28,7 @@ from ._vision import (
|
|
|
28
28
|
object_detection_metrics
|
|
29
29
|
)
|
|
30
30
|
|
|
31
|
-
from
|
|
31
|
+
from .._core import _imprimir_disponibles
|
|
32
32
|
|
|
33
33
|
|
|
34
34
|
__all__ = [
|
|
@@ -51,3 +51,7 @@ __all__ = [
|
|
|
51
51
|
"segmentation_metrics",
|
|
52
52
|
"object_detection_metrics",
|
|
53
53
|
]
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def info():
|
|
57
|
+
_imprimir_disponibles(__all__)
|
|
@@ -329,7 +329,11 @@ def classification_metrics(save_dir: Union[str, Path],
|
|
|
329
329
|
fig_roc, ax_roc = plt.subplots(figsize=CLASSIFICATION_PLOT_SIZE, dpi=DPI_value)
|
|
330
330
|
ax_roc.plot(fpr, tpr, label=f'AUC = {auc:.2f}', color=format_config.ROC_PR_line)
|
|
331
331
|
ax_roc.plot([0, 1], [0, 1], 'k--')
|
|
332
|
-
|
|
332
|
+
# use "ROC" if extra title, else use "Receiver Operating Characteristic" title
|
|
333
|
+
if plot_title.strip():
|
|
334
|
+
ax_roc.set_title(f'ROC{plot_title}', pad=_EvaluationConfig.LABEL_PADDING, fontsize=format_config.font_size + 2)
|
|
335
|
+
else:
|
|
336
|
+
ax_roc.set_title(f'Receiver Operating Characteristic', pad=_EvaluationConfig.LABEL_PADDING, fontsize=format_config.font_size + 2)
|
|
333
337
|
ax_roc.set_xlabel('False Positive Rate', labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=format_config.font_size)
|
|
334
338
|
ax_roc.set_ylabel('True Positive Rate', labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=format_config.font_size)
|
|
335
339
|
|
|
@@ -351,7 +355,11 @@ def classification_metrics(save_dir: Union[str, Path],
|
|
|
351
355
|
ap_score = average_precision_score(y_true_binary, y_score)
|
|
352
356
|
fig_pr, ax_pr = plt.subplots(figsize=CLASSIFICATION_PLOT_SIZE, dpi=DPI_value)
|
|
353
357
|
ax_pr.plot(recall, precision, label=f'Avg Precision = {ap_score:.2f}', color=format_config.ROC_PR_line)
|
|
354
|
-
|
|
358
|
+
# Use "PR Curve" if extra title, else use "Precision-Recall Curve" title
|
|
359
|
+
if plot_title.strip():
|
|
360
|
+
ax_pr.set_title(f'PR Curve{plot_title}', pad=_EvaluationConfig.LABEL_PADDING, fontsize=format_config.font_size + 2)
|
|
361
|
+
else:
|
|
362
|
+
ax_pr.set_title(f'Precision-Recall Curve', pad=_EvaluationConfig.LABEL_PADDING, fontsize=format_config.font_size + 2)
|
|
355
363
|
ax_pr.set_xlabel('Recall', labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=format_config.font_size)
|
|
356
364
|
ax_pr.set_ylabel('Precision', labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=format_config.font_size)
|
|
357
365
|
|
|
@@ -4,7 +4,7 @@ from ._ML_evaluation_captum import (
|
|
|
4
4
|
captum_segmentation_heatmap
|
|
5
5
|
)
|
|
6
6
|
|
|
7
|
-
from
|
|
7
|
+
from .._core import _imprimir_disponibles
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
__all__ = [
|
|
@@ -12,3 +12,7 @@ __all__ = [
|
|
|
12
12
|
"captum_image_heatmap",
|
|
13
13
|
"captum_segmentation_heatmap"
|
|
14
14
|
]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def info():
|
|
18
|
+
_imprimir_disponibles(__all__)
|
|
@@ -11,7 +11,7 @@ from ._multi_inference import (
|
|
|
11
11
|
multi_inference_classification,
|
|
12
12
|
)
|
|
13
13
|
|
|
14
|
-
from
|
|
14
|
+
from .._core import _imprimir_disponibles
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
__all__ = [
|
|
@@ -20,3 +20,7 @@ __all__ = [
|
|
|
20
20
|
"multi_inference_regression",
|
|
21
21
|
"multi_inference_classification"
|
|
22
22
|
]
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def info():
|
|
26
|
+
_imprimir_disponibles(__all__)
|
|
@@ -2,9 +2,13 @@ from ._sequence_inference import (
|
|
|
2
2
|
DragonSequenceInferenceHandler
|
|
3
3
|
)
|
|
4
4
|
|
|
5
|
-
from
|
|
5
|
+
from .._core import _imprimir_disponibles
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
__all__ = [
|
|
9
9
|
"DragonSequenceInferenceHandler"
|
|
10
10
|
]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def info():
|
|
14
|
+
_imprimir_disponibles(__all__)
|
|
@@ -2,9 +2,13 @@ from ._vision_inference import (
|
|
|
2
2
|
DragonVisionInferenceHandler
|
|
3
3
|
)
|
|
4
4
|
|
|
5
|
-
from
|
|
5
|
+
from .._core import _imprimir_disponibles
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
__all__ = [
|
|
9
9
|
"DragonVisionInferenceHandler",
|
|
10
10
|
]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def info():
|
|
14
|
+
_imprimir_disponibles(__all__)
|
ml_tools/ML_models/__init__.py
CHANGED
|
@@ -4,16 +4,27 @@ from ._mlp_attention import (
|
|
|
4
4
|
DragonMultiHeadAttentionNet
|
|
5
5
|
)
|
|
6
6
|
|
|
7
|
-
from .
|
|
8
|
-
DragonGateModel
|
|
9
|
-
|
|
10
|
-
|
|
7
|
+
from ._dragon_gate import (
|
|
8
|
+
DragonGateModel
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
from ._dragon_node import (
|
|
12
|
+
DragonNodeModel
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
from ._dragon_autoint import (
|
|
16
|
+
DragonAutoInt
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
from ._dragon_tabnet import (
|
|
11
20
|
DragonTabNet
|
|
12
21
|
)
|
|
13
22
|
|
|
14
|
-
from ._dragon_tabular import
|
|
23
|
+
from ._dragon_tabular import (
|
|
24
|
+
DragonTabularTransformer
|
|
25
|
+
)
|
|
15
26
|
|
|
16
|
-
from
|
|
27
|
+
from .._core import _imprimir_disponibles
|
|
17
28
|
|
|
18
29
|
|
|
19
30
|
__all__ = [
|
|
@@ -30,3 +41,7 @@ __all__ = [
|
|
|
30
41
|
"DragonTabNet",
|
|
31
42
|
]
|
|
32
43
|
|
|
44
|
+
|
|
45
|
+
def info():
|
|
46
|
+
_imprimir_disponibles(__all__)
|
|
47
|
+
|