dragon-ml-toolbox 20.2.0__py3-none-any.whl → 20.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.3.0.dist-info}/METADATA +1 -1
  2. dragon_ml_toolbox-20.3.0.dist-info/RECORD +143 -0
  3. ml_tools/ETL_cleaning/__init__.py +5 -1
  4. ml_tools/ETL_cleaning/_basic_clean.py +1 -1
  5. ml_tools/ETL_engineering/__init__.py +5 -1
  6. ml_tools/GUI_tools/__init__.py +5 -1
  7. ml_tools/IO_tools/_IO_loggers.py +12 -4
  8. ml_tools/IO_tools/__init__.py +5 -1
  9. ml_tools/MICE/__init__.py +8 -2
  10. ml_tools/MICE/_dragon_mice.py +1 -1
  11. ml_tools/ML_callbacks/__init__.py +5 -1
  12. ml_tools/ML_chain/__init__.py +5 -1
  13. ml_tools/ML_configuration/__init__.py +7 -1
  14. ml_tools/ML_configuration/_training.py +65 -1
  15. ml_tools/ML_datasetmaster/__init__.py +5 -1
  16. ml_tools/ML_datasetmaster/_base_datasetmaster.py +31 -20
  17. ml_tools/ML_datasetmaster/_datasetmaster.py +26 -9
  18. ml_tools/ML_datasetmaster/_sequence_datasetmaster.py +38 -23
  19. ml_tools/ML_evaluation/__init__.py +5 -1
  20. ml_tools/ML_evaluation_captum/__init__.py +5 -1
  21. ml_tools/ML_finalize_handler/__init__.py +5 -1
  22. ml_tools/ML_inference/__init__.py +5 -1
  23. ml_tools/ML_inference_sequence/__init__.py +5 -1
  24. ml_tools/ML_inference_vision/__init__.py +5 -1
  25. ml_tools/ML_models/__init__.py +21 -6
  26. ml_tools/ML_models/_dragon_autoint.py +302 -0
  27. ml_tools/ML_models/_dragon_gate.py +358 -0
  28. ml_tools/ML_models/_dragon_node.py +268 -0
  29. ml_tools/ML_models/_dragon_tabnet.py +255 -0
  30. ml_tools/ML_models_sequence/__init__.py +5 -1
  31. ml_tools/ML_models_vision/__init__.py +5 -1
  32. ml_tools/ML_optimization/__init__.py +11 -3
  33. ml_tools/ML_optimization/_multi_dragon.py +2 -2
  34. ml_tools/ML_optimization/_single_dragon.py +47 -67
  35. ml_tools/ML_optimization/_single_manual.py +1 -1
  36. ml_tools/ML_scaler/_ML_scaler.py +12 -7
  37. ml_tools/ML_scaler/__init__.py +5 -1
  38. ml_tools/ML_trainer/__init__.py +5 -1
  39. ml_tools/ML_trainer/_base_trainer.py +136 -13
  40. ml_tools/ML_trainer/_dragon_detection_trainer.py +31 -91
  41. ml_tools/ML_trainer/_dragon_sequence_trainer.py +24 -74
  42. ml_tools/ML_trainer/_dragon_trainer.py +24 -85
  43. ml_tools/ML_utilities/__init__.py +5 -1
  44. ml_tools/ML_utilities/_inspection.py +44 -30
  45. ml_tools/ML_vision_transformers/__init__.py +8 -2
  46. ml_tools/PSO_optimization/__init__.py +5 -1
  47. ml_tools/SQL/__init__.py +8 -2
  48. ml_tools/VIF/__init__.py +5 -1
  49. ml_tools/data_exploration/__init__.py +4 -1
  50. ml_tools/data_exploration/_cleaning.py +4 -2
  51. ml_tools/ensemble_evaluation/__init__.py +5 -1
  52. ml_tools/ensemble_inference/__init__.py +5 -1
  53. ml_tools/ensemble_learning/__init__.py +5 -1
  54. ml_tools/excel_handler/__init__.py +5 -1
  55. ml_tools/keys/__init__.py +5 -1
  56. ml_tools/math_utilities/__init__.py +5 -1
  57. ml_tools/optimization_tools/__init__.py +5 -1
  58. ml_tools/path_manager/__init__.py +8 -2
  59. ml_tools/plot_fonts/__init__.py +8 -2
  60. ml_tools/schema/__init__.py +8 -2
  61. ml_tools/schema/_feature_schema.py +3 -3
  62. ml_tools/serde/__init__.py +5 -1
  63. ml_tools/utilities/__init__.py +5 -1
  64. ml_tools/utilities/_utility_save_load.py +38 -20
  65. dragon_ml_toolbox-20.2.0.dist-info/RECORD +0 -179
  66. ml_tools/ETL_cleaning/_imprimir.py +0 -13
  67. ml_tools/ETL_engineering/_imprimir.py +0 -24
  68. ml_tools/GUI_tools/_imprimir.py +0 -12
  69. ml_tools/IO_tools/_imprimir.py +0 -14
  70. ml_tools/MICE/_imprimir.py +0 -11
  71. ml_tools/ML_callbacks/_imprimir.py +0 -12
  72. ml_tools/ML_chain/_imprimir.py +0 -12
  73. ml_tools/ML_configuration/_imprimir.py +0 -47
  74. ml_tools/ML_datasetmaster/_imprimir.py +0 -15
  75. ml_tools/ML_evaluation/_imprimir.py +0 -25
  76. ml_tools/ML_evaluation_captum/_imprimir.py +0 -10
  77. ml_tools/ML_finalize_handler/_imprimir.py +0 -8
  78. ml_tools/ML_inference/_imprimir.py +0 -11
  79. ml_tools/ML_inference_sequence/_imprimir.py +0 -8
  80. ml_tools/ML_inference_vision/_imprimir.py +0 -8
  81. ml_tools/ML_models/_advanced_models.py +0 -1086
  82. ml_tools/ML_models/_imprimir.py +0 -18
  83. ml_tools/ML_models_sequence/_imprimir.py +0 -8
  84. ml_tools/ML_models_vision/_imprimir.py +0 -16
  85. ml_tools/ML_optimization/_imprimir.py +0 -13
  86. ml_tools/ML_scaler/_imprimir.py +0 -8
  87. ml_tools/ML_trainer/_imprimir.py +0 -10
  88. ml_tools/ML_utilities/_imprimir.py +0 -16
  89. ml_tools/ML_vision_transformers/_imprimir.py +0 -14
  90. ml_tools/PSO_optimization/_imprimir.py +0 -10
  91. ml_tools/SQL/_imprimir.py +0 -8
  92. ml_tools/VIF/_imprimir.py +0 -10
  93. ml_tools/data_exploration/_imprimir.py +0 -32
  94. ml_tools/ensemble_evaluation/_imprimir.py +0 -14
  95. ml_tools/ensemble_inference/_imprimir.py +0 -9
  96. ml_tools/ensemble_learning/_imprimir.py +0 -10
  97. ml_tools/excel_handler/_imprimir.py +0 -13
  98. ml_tools/keys/_imprimir.py +0 -11
  99. ml_tools/math_utilities/_imprimir.py +0 -11
  100. ml_tools/optimization_tools/_imprimir.py +0 -13
  101. ml_tools/path_manager/_imprimir.py +0 -15
  102. ml_tools/plot_fonts/_imprimir.py +0 -8
  103. ml_tools/schema/_imprimir.py +0 -10
  104. ml_tools/serde/_imprimir.py +0 -10
  105. ml_tools/utilities/_imprimir.py +0 -18
  106. {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.3.0.dist-info}/WHEEL +0 -0
  107. {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.3.0.dist-info}/licenses/LICENSE +0 -0
  108. {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.3.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +0 -0
  109. {dragon_ml_toolbox-20.2.0.dist-info → dragon_ml_toolbox-20.3.0.dist-info}/top_level.txt +0 -0
@@ -5,7 +5,7 @@ import torch
5
5
  from torch import nn
6
6
 
7
7
  from ..utilities import load_dataframe
8
- from ..IO_tools import save_list_strings, custom_logger
8
+ from ..IO_tools import save_list_strings, save_json
9
9
 
10
10
  from ..path_manager import make_fullpath, list_subdirectories
11
11
  from .._core import get_logger
@@ -23,7 +23,7 @@ __all__ = [
23
23
  ]
24
24
 
25
25
 
26
- def get_model_parameters(model: nn.Module, save_dir: Optional[Union[str,Path]]=None) -> dict[str, int]:
26
+ def get_model_parameters(model: nn.Module, save_dir: Optional[Union[str,Path]]=None, verbose: int = 3) -> dict[str, int]:
27
27
  """
28
28
  Calculates the total and trainable parameters of a PyTorch model.
29
29
 
@@ -46,18 +46,22 @@ def get_model_parameters(model: nn.Module, save_dir: Optional[Union[str,Path]]=N
46
46
 
47
47
  if save_dir is not None:
48
48
  output_dir = make_fullpath(save_dir, make=True, enforce="directory")
49
- custom_logger(data=report,
50
- save_directory=output_dir,
51
- log_name=UtilityKeys.MODEL_PARAMS_FILE,
52
- add_timestamp=False,
53
- dict_as="json")
49
+
50
+ save_json(data=report,
51
+ directory=output_dir,
52
+ filename=UtilityKeys.MODEL_PARAMS_FILE,
53
+ verbose=False)
54
+
55
+ if verbose >= 2:
56
+ _LOGGER.info(f"Model parameters report saved to '{output_dir.name}/{UtilityKeys.MODEL_PARAMS_FILE}.json'")
54
57
 
55
58
  return report
56
59
 
57
60
 
58
61
  def inspect_model_architecture(
59
62
  model: nn.Module,
60
- save_dir: Union[str, Path]
63
+ save_dir: Union[str, Path],
64
+ verbose: int = 3
61
65
  ) -> None:
62
66
  """
63
67
  Saves a human-readable text summary of a model's instantiated
@@ -84,7 +88,8 @@ def inspect_model_architecture(
84
88
  f"{'='*80}\n\n"
85
89
  )
86
90
  except Exception as e:
87
- _LOGGER.warning(f"Could not get model parameters: {e}")
91
+ if verbose >= 1:
92
+ _LOGGER.warning(f"Could not get model parameters: {e}")
88
93
  header = f"Model: {model.__class__.__name__}\n{'='*80}\n\n"
89
94
 
90
95
  # --- 3. Get architecture string ---
@@ -95,7 +100,8 @@ def inspect_model_architecture(
95
100
  with open(filepath, 'w', encoding='utf-8') as f:
96
101
  f.write(header)
97
102
  f.write(arch_string)
98
- _LOGGER.info(f"Model architecture summary saved to '{filepath.name}'")
103
+ if verbose >= 2:
104
+ _LOGGER.info(f"Model architecture summary saved to '{filepath.name}'")
99
105
  except Exception as e:
100
106
  _LOGGER.error(f"Failed to write model architecture file: {e}")
101
107
  raise
@@ -104,6 +110,7 @@ def inspect_model_architecture(
104
110
  def inspect_pth_file(
105
111
  pth_path: Union[str, Path],
106
112
  save_dir: Union[str, Path],
113
+ verbose: int = 3
107
114
  ) -> None:
108
115
  """
109
116
  Inspects a .pth file (e.g., checkpoint) and saves a human-readable
@@ -166,12 +173,12 @@ def inspect_pth_file(
166
173
  if PyTorchCheckpointKeys.MODEL_STATE in loaded_data and isinstance(loaded_data[PyTorchCheckpointKeys.MODEL_STATE], dict):
167
174
  report["notes"].append(f"Found standard checkpoint key: '{PyTorchCheckpointKeys.MODEL_STATE}'. Analyzing as model state_dict.")
168
175
  state_dict = loaded_data[PyTorchCheckpointKeys.MODEL_STATE]
169
- report["model_state_analysis"] = _generate_weight_report(state_dict)
176
+ report["model_state_analysis"] = _generate_weight_report(state_dict, verbose=verbose)
170
177
 
171
178
  elif all(isinstance(v, torch.Tensor) for v in loaded_data.values()):
172
179
  report["notes"].append("File dictionary contains only tensors. Analyzing entire dictionary as model state_dict.")
173
180
  state_dict = loaded_data
174
- report["model_state_analysis"] = _generate_weight_report(state_dict)
181
+ report["model_state_analysis"] = _generate_weight_report(state_dict, verbose=verbose)
175
182
 
176
183
  else:
177
184
  report["notes"].append("Could not identify a single model state_dict. See top_level_summary for all contents. No detailed weight analysis will be performed.")
@@ -181,22 +188,24 @@ def inspect_pth_file(
181
188
  # _LOGGER.warning("Loading a full, pickled nn.Module is not recommended. Inspecting its state_dict().")
182
189
  report["notes"].append("File is a full, pickled nn.Module. This is not recommended. Extracting state_dict() for analysis.")
183
190
  state_dict = loaded_data.state_dict()
184
- report["model_state_analysis"] = _generate_weight_report(state_dict)
191
+ report["model_state_analysis"] = _generate_weight_report(state_dict, verbose=verbose)
185
192
 
186
193
  else:
187
194
  # --- Case 3: Unrecognized format (e.g., single tensor, list) ---
188
195
  _LOGGER.error(f"Could not parse .pth file. Loaded data is of type {type(loaded_data)}, not a dict or nn.Module.")
189
196
  raise ValueError()
190
197
 
191
- # --- 5. Save Report ---
192
- custom_logger(data=report,
193
- save_directory=output_dir,
194
- log_name=UtilityKeys.PTH_FILE + pth_name,
195
- add_timestamp=False,
196
- dict_as="json")
198
+ # --- 5. Save Report ---
199
+ save_json(data=report,
200
+ directory=output_dir,
201
+ filename=UtilityKeys.PTH_FILE + pth_name,
202
+ verbose=False)
203
+
204
+ if verbose >= 2:
205
+ _LOGGER.info(f".pth file inspection report saved to '{output_dir.name}/{UtilityKeys.PTH_FILE + pth_name}.json'")
197
206
 
198
207
 
199
- def _generate_weight_report(state_dict: dict) -> dict:
208
+ def _generate_weight_report(state_dict: dict, verbose: int = 3) -> dict:
200
209
  """
201
210
  Internal helper to analyze a state_dict and return a structured report.
202
211
 
@@ -209,12 +218,14 @@ def _generate_weight_report(state_dict: dict) -> dict:
209
218
  weight_report = {}
210
219
  total_params = 0
211
220
  if not isinstance(state_dict, dict):
212
- _LOGGER.warning(f"Attempted to generate weight report on non-dict type: {type(state_dict)}")
221
+ if verbose >= 1:
222
+ _LOGGER.warning(f"Attempted to generate weight report on non-dict type: {type(state_dict)}")
213
223
  return {"error": "Input was not a dictionary."}
214
224
 
215
225
  for key, tensor in state_dict.items():
216
226
  if not isinstance(tensor, torch.Tensor):
217
- _LOGGER.warning(f"Skipping key '{key}' in state_dict: value is not a tensor (type: {type(tensor)}).")
227
+ if verbose >= 1:
228
+ _LOGGER.warning(f"Skipping key '{key}' in state_dict: value is not a tensor (type: {type(tensor)}).")
218
229
  weight_report[key] = {
219
230
  "type": str(type(tensor)),
220
231
  "value_preview": str(tensor)[:50] # Show a preview
@@ -239,7 +250,7 @@ def select_features_by_shap(
239
250
  root_directory: Union[str, Path],
240
251
  shap_threshold: float,
241
252
  log_feature_names_directory: Optional[Union[str, Path]],
242
- verbose: bool = True) -> list[str]:
253
+ verbose: int = 3) -> list[str]:
243
254
  """
244
255
  Scans subdirectories to find SHAP summary CSVs, then extracts feature
245
256
  names whose mean absolute SHAP value meets a specified threshold.
@@ -261,7 +272,7 @@ def select_features_by_shap(
261
272
  A single, sorted list of unique feature names that meet the
262
273
  threshold criteria across all found files.
263
274
  """
264
- if verbose:
275
+ if verbose >= 2:
265
276
  _LOGGER.info(f"Starting feature selection with SHAP threshold >= {shap_threshold}")
266
277
  root_path = make_fullpath(root_directory, enforce="directory")
267
278
 
@@ -276,13 +287,14 @@ def select_features_by_shap(
276
287
  if expected_path.is_file():
277
288
  valid_csv_paths.append(expected_path)
278
289
  else:
279
- _LOGGER.warning(f"No '{shap_filename}' found in subdirectory '{dir_name}'.")
290
+ if verbose >= 1:
291
+ _LOGGER.warning(f"No '{shap_filename}' found in subdirectory '{dir_name}'.")
280
292
 
281
293
  if not valid_csv_paths:
282
294
  _LOGGER.error(f"Process halted: No '{shap_filename}' files were found in any subdirectory.")
283
295
  return []
284
296
 
285
- if verbose:
297
+ if verbose >= 3:
286
298
  _LOGGER.info(f"Found {len(valid_csv_paths)} SHAP summary files to process.")
287
299
 
288
300
  # --- Step 3: Data Processing and Feature Extraction ---
@@ -294,7 +306,8 @@ def select_features_by_shap(
294
306
  # Validate required columns
295
307
  required_cols = {SHAPKeys.FEATURE_COLUMN, SHAPKeys.SHAP_VALUE_COLUMN}
296
308
  if not required_cols.issubset(df.columns):
297
- _LOGGER.warning(f"Skipping '{csv_path}': missing required columns.")
309
+ if verbose >= 1:
310
+ _LOGGER.warning(f"Skipping '{csv_path}': missing required columns.")
298
311
  continue
299
312
 
300
313
  # Filter by threshold and extract features
@@ -303,7 +316,8 @@ def select_features_by_shap(
303
316
  master_feature_set.update(features)
304
317
 
305
318
  except (ValueError, pd.errors.EmptyDataError):
306
- _LOGGER.warning(f"Skipping '{csv_path}' because it is empty or malformed.")
319
+ if verbose >= 1:
320
+ _LOGGER.warning(f"Skipping '{csv_path}' because it is empty or malformed.")
307
321
  continue
308
322
  except Exception as e:
309
323
  _LOGGER.error(f"An unexpected error occurred while processing '{csv_path}': {e}")
@@ -311,7 +325,7 @@ def select_features_by_shap(
311
325
 
312
326
  # --- Step 4: Finalize and Return ---
313
327
  final_features = sorted(list(master_feature_set))
314
- if verbose:
328
+ if verbose >= 2:
315
329
  _LOGGER.info(f"Selected {len(final_features)} unique features across all files.")
316
330
 
317
331
  if log_feature_names_directory is not None:
@@ -319,7 +333,7 @@ def select_features_by_shap(
319
333
  save_list_strings(list_strings=final_features,
320
334
  directory=save_names_path,
321
335
  filename=DatasetKeys.FEATURE_NAMES,
322
- verbose=verbose)
336
+ verbose=False)
323
337
 
324
338
  return final_features
325
339
 
@@ -5,9 +5,11 @@ from ._core_transforms import (
5
5
  RandomHistogramEqualization,
6
6
  )
7
7
 
8
- from ._offline_augmentation import create_offline_augmentations
8
+ from ._offline_augmentation import (
9
+ create_offline_augmentations
10
+ )
9
11
 
10
- from ._imprimir import info
12
+ from .._core import _imprimir_disponibles
11
13
 
12
14
 
13
15
  __all__ = [
@@ -19,3 +21,7 @@ __all__ = [
19
21
  # Offline Augmentation
20
22
  "create_offline_augmentations",
21
23
  ]
24
+
25
+
26
+ def info():
27
+ _imprimir_disponibles(__all__)
@@ -4,7 +4,7 @@ from ._PSO import (
4
4
  run_pso
5
5
  )
6
6
 
7
- from ._imprimir import info
7
+ from .._core import _imprimir_disponibles
8
8
 
9
9
 
10
10
  __all__ = [
@@ -12,3 +12,7 @@ __all__ = [
12
12
  "multiple_objective_functions_from_dir",
13
13
  "run_pso"
14
14
  ]
15
+
16
+
17
+ def info():
18
+ _imprimir_disponibles(__all__)
ml_tools/SQL/__init__.py CHANGED
@@ -1,7 +1,13 @@
1
- from ._dragon_SQL import DragonSQL
1
+ from ._dragon_SQL import (
2
+ DragonSQL
3
+ )
2
4
 
3
- from ._imprimir import info
5
+ from .._core import _imprimir_disponibles
4
6
 
5
7
  __all__ = [
6
8
  "DragonSQL",
7
9
  ]
10
+
11
+
12
+ def info():
13
+ _imprimir_disponibles(__all__)
ml_tools/VIF/__init__.py CHANGED
@@ -4,7 +4,7 @@ from ._VIF_factor import (
4
4
  compute_vif_multi,
5
5
  )
6
6
 
7
- from ._imprimir import info
7
+ from .._core import _imprimir_disponibles
8
8
 
9
9
 
10
10
  __all__ = [
@@ -12,3 +12,7 @@ __all__ = [
12
12
  "drop_vif_based",
13
13
  "compute_vif_multi"
14
14
  ]
15
+
16
+
17
+ def info():
18
+ _imprimir_disponibles(__all__)
@@ -38,7 +38,7 @@ from ._schema_ops import (
38
38
  apply_feature_schema,
39
39
  )
40
40
 
41
- from ._imprimir import info
41
+ from .._core import _imprimir_disponibles
42
42
 
43
43
 
44
44
  __all__ = [
@@ -68,3 +68,6 @@ __all__ = [
68
68
  "reconstruct_binary",
69
69
  "reconstruct_multibinary",
70
70
  ]
71
+
72
+ def info():
73
+ _imprimir_disponibles(__all__)
@@ -221,7 +221,8 @@ def drop_macro(df: pd.DataFrame,
221
221
  )
222
222
  save_dataframe_filename(df=missing_data_start.reset_index(drop=False),
223
223
  save_dir=full_path,
224
- filename="Missing_Data_Original")
224
+ filename="Missing_Data_Original",
225
+ verbose=2)
225
226
 
226
227
  # Clean cycles for rows and columns
227
228
  master = True
@@ -255,7 +256,8 @@ def drop_macro(df: pd.DataFrame,
255
256
  )
256
257
  save_dataframe_filename(df=missing_data_final.reset_index(drop=False),
257
258
  save_dir=full_path,
258
- filename="Missing_Data_Processed")
259
+ filename="Missing_Data_Processed",
260
+ verbose=2)
259
261
 
260
262
  # return cleaned dataframe
261
263
  return df_clean
@@ -8,7 +8,7 @@ from ._ensemble_evaluation import (
8
8
  plot_learning_curves
9
9
  )
10
10
 
11
- from ._imprimir import info
11
+ from .._core import _imprimir_disponibles
12
12
 
13
13
 
14
14
  __all__ = [
@@ -20,3 +20,7 @@ __all__ = [
20
20
  "get_shap_values",
21
21
  "plot_learning_curves"
22
22
  ]
23
+
24
+
25
+ def info():
26
+ _imprimir_disponibles(__all__)
@@ -3,10 +3,14 @@ from ._ensemble_inference import (
3
3
  model_report
4
4
  )
5
5
 
6
- from ._imprimir import info
6
+ from .._core import _imprimir_disponibles
7
7
 
8
8
 
9
9
  __all__ = [
10
10
  "DragonEnsembleInferenceHandler",
11
11
  "model_report"
12
12
  ]
13
+
14
+
15
+ def info():
16
+ _imprimir_disponibles(__all__)
@@ -4,7 +4,7 @@ from ._ensemble_learning import (
4
4
  run_ensemble_pipeline,
5
5
  )
6
6
 
7
- from ._imprimir import info
7
+ from .._core import _imprimir_disponibles
8
8
 
9
9
 
10
10
  __all__ = [
@@ -12,3 +12,7 @@ __all__ = [
12
12
  "ClassificationTreeModels",
13
13
  "run_ensemble_pipeline",
14
14
  ]
15
+
16
+
17
+ def info():
18
+ _imprimir_disponibles(__all__)
@@ -7,7 +7,7 @@ from ._excel_handler import (
7
7
  horizontal_merge_transform_excel
8
8
  )
9
9
 
10
- from ._imprimir import info
10
+ from .._core import _imprimir_disponibles
11
11
 
12
12
 
13
13
  __all__ = [
@@ -18,3 +18,7 @@ __all__ = [
18
18
  "vertical_merge_transform_excel",
19
19
  "horizontal_merge_transform_excel"
20
20
  ]
21
+
22
+
23
+ def info():
24
+ _imprimir_disponibles(__all__)
ml_tools/keys/__init__.py CHANGED
@@ -5,7 +5,7 @@ from ._keys import (
5
5
  _PublicTaskKeys as TaskKeys,
6
6
  )
7
7
 
8
- from ._imprimir import info
8
+ from .._core import _imprimir_disponibles
9
9
 
10
10
 
11
11
  __all__ = [
@@ -14,3 +14,7 @@ __all__ = [
14
14
  "FinalizedFileKeys",
15
15
  "TaskKeys",
16
16
  ]
17
+
18
+
19
+ def info():
20
+ _imprimir_disponibles(__all__)
@@ -6,7 +6,7 @@ from ._math_utilities import (
6
6
  )
7
7
 
8
8
 
9
- from ._imprimir import info
9
+ from .._core import _imprimir_disponibles
10
10
 
11
11
 
12
12
  __all__ = [
@@ -15,3 +15,7 @@ __all__ = [
15
15
  "threshold_binary_values_batch",
16
16
  "discretize_categorical_values",
17
17
  ]
18
+
19
+
20
+ def info():
21
+ _imprimir_disponibles(__all__)
@@ -10,7 +10,7 @@ from ._optimization_bounds import (
10
10
  parse_lower_upper_bounds,
11
11
  )
12
12
 
13
- from ._imprimir import info
13
+ from .._core import _imprimir_disponibles
14
14
 
15
15
 
16
16
  __all__ = [
@@ -21,3 +21,7 @@ __all__ = [
21
21
  "plot_optimal_feature_distributions",
22
22
  "plot_optimal_feature_distributions_from_dataframe",
23
23
  ]
24
+
25
+
26
+ def info():
27
+ _imprimir_disponibles(__all__)
@@ -1,4 +1,6 @@
1
- from ._dragonmanager import DragonPathManager
1
+ from ._dragonmanager import (
2
+ DragonPathManager
3
+ )
2
4
 
3
5
  from ._path_tools import (
4
6
  make_fullpath,
@@ -10,7 +12,7 @@ from ._path_tools import (
10
12
  safe_move,
11
13
  )
12
14
 
13
- from ._imprimir import info
15
+ from .._core import _imprimir_disponibles
14
16
 
15
17
 
16
18
  __all__ = [
@@ -23,3 +25,7 @@ __all__ = [
23
25
  "clean_directory",
24
26
  "safe_move",
25
27
  ]
28
+
29
+
30
+ def info():
31
+ _imprimir_disponibles(__all__)
@@ -1,8 +1,14 @@
1
- from ._plot_fonts import configure_cjk_fonts
1
+ from ._plot_fonts import (
2
+ configure_cjk_fonts
3
+ )
2
4
 
3
- from ._imprimir import info
5
+ from .._core import _imprimir_disponibles
4
6
 
5
7
 
6
8
  __all__ = [
7
9
  "configure_cjk_fonts"
8
10
  ]
11
+
12
+
13
+ def info():
14
+ _imprimir_disponibles(__all__)
@@ -1,11 +1,13 @@
1
- from ._feature_schema import FeatureSchema
1
+ from ._feature_schema import (
2
+ FeatureSchema
3
+ )
2
4
 
3
5
  from ._gui_schema import (
4
6
  create_guischema_template,
5
7
  make_multibinary_groups
6
8
  )
7
9
 
8
- from ._imprimir import info
10
+ from .._core import _imprimir_disponibles
9
11
 
10
12
 
11
13
  __all__ = [
@@ -13,3 +15,7 @@ __all__ = [
13
15
  "create_guischema_template",
14
16
  "make_multibinary_groups",
15
17
  ]
18
+
19
+
20
+ def info():
21
+ _imprimir_disponibles(__all__)
@@ -56,7 +56,7 @@ class FeatureSchema(NamedTuple):
56
56
  json.dump(data, f, indent=4)
57
57
 
58
58
  if verbose:
59
- _LOGGER.info(f"FeatureSchema saved to '{dir_path.name}/{SchemaKeys.SCHEMA_FILENAME}'")
59
+ _LOGGER.info(f"{self} saved to '{dir_path.name}/{SchemaKeys.SCHEMA_FILENAME}'")
60
60
 
61
61
  except (IOError, TypeError) as e:
62
62
  _LOGGER.error(f"Failed to save FeatureSchema to JSON: {e}")
@@ -86,7 +86,7 @@ class FeatureSchema(NamedTuple):
86
86
  schema = cls(**schema_kwargs)
87
87
 
88
88
  if verbose:
89
- _LOGGER.info(f"FeatureSchema loaded from '{dir_path.name}'")
89
+ _LOGGER.info(f"{schema} loaded from '{dir_path.name}/{SchemaKeys.SCHEMA_FILENAME}'")
90
90
 
91
91
  return schema
92
92
 
@@ -142,7 +142,7 @@ class FeatureSchema(NamedTuple):
142
142
  schema = cls(**schema_kwargs)
143
143
 
144
144
  if verbose:
145
- _LOGGER.info(f"FeatureSchema extracted from architecture '{target_path.name}'")
145
+ _LOGGER.info(f"{schema} extracted from architecture '{target_path.parent.name}/{target_path.name}'")
146
146
 
147
147
  return schema
148
148
 
@@ -4,7 +4,7 @@ from ._serde import (
4
4
  deserialize_object,
5
5
  )
6
6
 
7
- from ._imprimir import info
7
+ from .._core import _imprimir_disponibles
8
8
 
9
9
 
10
10
  __all__ = [
@@ -12,3 +12,7 @@ __all__ = [
12
12
  "serialize_object",
13
13
  "deserialize_object",
14
14
  ]
15
+
16
+
17
+ def info():
18
+ _imprimir_disponibles(__all__)
@@ -15,7 +15,7 @@ from ._utility_tools import (
15
15
  train_dataset_yielder
16
16
  )
17
17
 
18
- from ._imprimir import info
18
+ from .._core import _imprimir_disponibles
19
19
 
20
20
 
21
21
  __all__ = [
@@ -31,3 +31,7 @@ __all__ = [
31
31
  "train_dataset_orchestrator",
32
32
  "train_dataset_yielder"
33
33
  ]
34
+
35
+
36
+ def info():
37
+ _imprimir_disponibles(__all__)