dragon-ml-toolbox 20.9.0__py3-none-any.whl → 20.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dragon-ml-toolbox
3
- Version: 20.9.0
3
+ Version: 20.11.0
4
4
  Summary: Complete pipelines and helper tools for data science and machine learning projects.
5
5
  Author-email: Karl Luigi Loza Vidaurre <luigiloza@gmail.com>
6
6
  License-Expression: MIT
@@ -1,5 +1,5 @@
1
- dragon_ml_toolbox-20.9.0.dist-info/licenses/LICENSE,sha256=L35WDmmLZNTlJvxF6Vy7Uy4SYNi6rCfWUqlTHpoRMoU,1081
2
- dragon_ml_toolbox-20.9.0.dist-info/licenses/LICENSE-THIRD-PARTY.md,sha256=0-HBRMMgKuwtGy6nMJZvIn1fLxhx_ksyyVB2U_iyYZU,2818
1
+ dragon_ml_toolbox-20.11.0.dist-info/licenses/LICENSE,sha256=L35WDmmLZNTlJvxF6Vy7Uy4SYNi6rCfWUqlTHpoRMoU,1081
2
+ dragon_ml_toolbox-20.11.0.dist-info/licenses/LICENSE-THIRD-PARTY.md,sha256=0-HBRMMgKuwtGy6nMJZvIn1fLxhx_ksyyVB2U_iyYZU,2818
3
3
  ml_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  ml_tools/constants.py,sha256=3br5Rk9cL2IUo638eJuMOGdbGQaWssaUecYEvSeRBLM,3322
5
5
  ml_tools/ETL_cleaning/__init__.py,sha256=gLRHF-qzwpqKTvbbn9chIQELeUDh_XGpBRX28j-5IqI,545
@@ -20,7 +20,7 @@ ml_tools/MICE/__init__.py,sha256=-IZv9V06U7BbB3ubu1vgbxtwFy0dV6E-9EDSg6-woio,385
20
20
  ml_tools/MICE/_dragon_mice.py,sha256=k82I3-f4aMuSW7LzTRnuBniEig7A9_vH-Oj7yWum6ss,17817
21
21
  ml_tools/ML_callbacks/__init__.py,sha256=xck_IdLFYCq6Lo2lQqbQd_nOeCDI8nfVyxBaBnQ-wcY,490
22
22
  ml_tools/ML_callbacks/_base.py,sha256=xLVAFOhBHjqnf8a_wKgW1F-tn2u6EqV3IHXsXKTn2NE,3269
23
- ml_tools/ML_callbacks/_checkpoint.py,sha256=Ioj9wn8XlsR_S1NnmWbyT9lkO8o2_DcHVMrFtxYJOes,9721
23
+ ml_tools/ML_callbacks/_checkpoint.py,sha256=hzFRCXJexS91lN428ztT85_db9DwaAwczODvdDMn9B4,9721
24
24
  ml_tools/ML_callbacks/_early_stop.py,sha256=qzTzxfDCDim0qj7QQ7ykJNIOBWbXtviDptMCczXXy_k,8073
25
25
  ml_tools/ML_callbacks/_scheduler.py,sha256=mn97_VH8Lp37KH3zSgmPemGQV8g-K8GfhRNHTftaNcg,7390
26
26
  ml_tools/ML_chain/__init__.py,sha256=aqSGAJnFYE_ZWbueNneg2z5welBsmGJ0XKi8Ebgw6Eg,554
@@ -46,7 +46,7 @@ ml_tools/ML_evaluation/_loss.py,sha256=1a4O25i3Ya_3naNZNL7ELLUL46BY86g1scA7d7q2U
46
46
  ml_tools/ML_evaluation/_regression.py,sha256=UZA7_fg85ZKJQWszioWDtmkplSiXeHJk2fBYR5bRXHY,11225
47
47
  ml_tools/ML_evaluation/_sequence.py,sha256=gUk9Uvmy7MrXkfrriMnfypkgJU5XERHdqekTa2gBaOM,8004
48
48
  ml_tools/ML_evaluation/_vision.py,sha256=abBHQ6Z2GunHNusL3wcLgfI1FVNA6hBUBTq1eOA8FSA,11489
49
- ml_tools/ML_evaluation_captum/_ML_evaluation_captum.py,sha256=6g3ymSxJGHXxwIN7WCD2Zi9zxKWEv-Qskd2cCGQQJ5Y,18439
49
+ ml_tools/ML_evaluation_captum/_ML_evaluation_captum.py,sha256=RMWkSmqHbb0Lj7W_uQJInEexOjXYmhMkcVHZT77wUrc,18847
50
50
  ml_tools/ML_evaluation_captum/__init__.py,sha256=DZDoZXexCI49JNl_tTmFfYW4hTUYK5QQLex01wMfhnk,333
51
51
  ml_tools/ML_finalize_handler/_ML_finalize_handler.py,sha256=g-vkHJDTGXZsKOUA-Yfg7EuA1SmaHjzesCPiAyRMg2k,7054
52
52
  ml_tools/ML_finalize_handler/__init__.py,sha256=VQyLbCQUcliAAFiOAsnPhyJ7UVYgbSqAbAnpqeOnRSg,198
@@ -83,7 +83,7 @@ ml_tools/ML_optimization/_single_manual.py,sha256=h-_k9JmRqPkjTra1nu7AyYbSyWkYZ1
83
83
  ml_tools/ML_scaler/_ML_scaler.py,sha256=P75X0Sx8N-VxC2Qy8aG7mWaZlkTfjspiZDi1YiMQD1I,8872
84
84
  ml_tools/ML_scaler/__init__.py,sha256=SHDNyLsoOLl2OtkIb3pGg-JRs3E2bYJBgnHwH3vw_Tk,172
85
85
  ml_tools/ML_trainer/__init__.py,sha256=42kueHa7Z0b_yLbywNCgIxlW6WmgLBqkTFwKH7vFLXw,379
86
- ml_tools/ML_trainer/_base_trainer.py,sha256=mflBw36SEN3pc8fOVqazrjwYk9n7Ey7dEhWgLfhD_Dw,17699
86
+ ml_tools/ML_trainer/_base_trainer.py,sha256=0ATm672NRsjJ6nv_NEl6-OEd9Bst1-s5OPxfG4qe8Lg,18075
87
87
  ml_tools/ML_trainer/_dragon_detection_trainer.py,sha256=B5F93PPnp2fYQmj1SYFRnAPVA39JwZUtJRMCdpSQF7k,16235
88
88
  ml_tools/ML_trainer/_dragon_sequence_trainer.py,sha256=Tj4YGgMrCkLnnNUlT_8wcdJFFcFhsdux308QPiqj-tw,23509
89
89
  ml_tools/ML_trainer/_dragon_trainer.py,sha256=bvSen_liut6B7gbg53MxOXKpJUkRaHtXDXW2SXBWPYQ,58553
@@ -119,7 +119,7 @@ ml_tools/ensemble_learning/_ensemble_learning.py,sha256=MHDZBR20_nStlSSeThFI3bSu
119
119
  ml_tools/excel_handler/__init__.py,sha256=AaWM3n_dqBhJLTs3OEA57ex5YykKXNOwVCyHlVsdnqI,530
120
120
  ml_tools/excel_handler/_excel_handler.py,sha256=TODudmeQgDSdxUKzLfAzizs--VL-g8WxDOfQ4sgxxLs,13965
121
121
  ml_tools/keys/__init__.py,sha256=-0c2pmrhyfROc-oQpEjJGLBMhSagA3CyFijQaaqZRqU,399
122
- ml_tools/keys/_keys.py,sha256=56hlyPl2VUMsq7cFFLBypWHr-JU6ehWGwZG38l6IjI0,9389
122
+ ml_tools/keys/_keys.py,sha256=Kr73o9SaH5Y3DT0z0H-1eLwlBplJmjisjoO_EoUNkAg,9388
123
123
  ml_tools/math_utilities/__init__.py,sha256=K7Obkkc4rPKj4EbRZf1BsXHfiCg7FXYv_aN9Yc2Z_Vg,400
124
124
  ml_tools/math_utilities/_math_utilities.py,sha256=BYHIVcM9tuKIhVrkgLLiM5QalJ39zx7dXYy_M9aGgiM,9012
125
125
  ml_tools/optimization_tools/__init__.py,sha256=KD8JXpfGuPndO4AHnjJGu6uV1GRwhOfboD0KZV45kzw,658
@@ -143,7 +143,7 @@ ml_tools/utilities/__init__.py,sha256=h4lE3SQstg-opcQj6QSKhu-HkqSbmHExsWoM9vC5D9
143
143
  ml_tools/utilities/_translate.py,sha256=U8hRPa3PmTpIf9n9yR3gBGmp_hkcsjQLwjAHSHc0WHs,10325
144
144
  ml_tools/utilities/_utility_save_load.py,sha256=EFvFaTaHahDQWdJWZr-j7cHqRbG_Xrpc96228JhV-bs,16773
145
145
  ml_tools/utilities/_utility_tools.py,sha256=bN0J9d1S0W5wNzNntBWqDsJcEAK7-1OgQg3X2fwXns0,6918
146
- dragon_ml_toolbox-20.9.0.dist-info/METADATA,sha256=ehKhp6BpCkHcZnWpcoZU53rn4T0yI0Dboq3eH2vx8LU,7888
147
- dragon_ml_toolbox-20.9.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
148
- dragon_ml_toolbox-20.9.0.dist-info/top_level.txt,sha256=wm-oxax3ciyez6VoO4zsFd-gSok2VipYXnbg3TH9PtU,9
149
- dragon_ml_toolbox-20.9.0.dist-info/RECORD,,
146
+ dragon_ml_toolbox-20.11.0.dist-info/METADATA,sha256=KiKepG9k7M1RbCXxEutcr7EkvDPaWIRiKoSvrTR1HSw,7889
147
+ dragon_ml_toolbox-20.11.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
148
+ dragon_ml_toolbox-20.11.0.dist-info/top_level.txt,sha256=wm-oxax3ciyez6VoO4zsFd-gSok2VipYXnbg3TH9PtU,9
149
+ dragon_ml_toolbox-20.11.0.dist-info/RECORD,,
@@ -79,7 +79,7 @@ class DragonModelCheckpoint(_Callback):
79
79
 
80
80
  def on_train_begin(self, logs=None):
81
81
  """Reset file tracking state when training starts.
82
- NOTE: Do nOT reset self.best here if it differs from the default. This allows the Trainer to restore 'best' from a checkpoint before calling train()."""
82
+ NOTE: Do not reset self.best here if it differs from the default. This allows the Trainer to restore 'best' from a checkpoint before calling train()."""
83
83
  self.best_checkpoints = []
84
84
  self.recent_checkpoints = []
85
85
 
@@ -126,7 +126,8 @@ def captum_feature_importance(model: nn.Module,
126
126
  feature_names=feature_names,
127
127
  save_dir=save_dir_path,
128
128
  n_steps=n_steps,
129
- file_suffix=f"_{clean_name}"
129
+ file_suffix=f"_{clean_name}",
130
+ target_name=name # Pass original name for plotting
130
131
  )
131
132
 
132
133
 
@@ -137,7 +138,8 @@ def _process_single_target(ig: 'IntegratedGradients', # type: ignore
137
138
  feature_names: Optional[list[str]],
138
139
  save_dir: Path,
139
140
  n_steps: int,
140
- file_suffix: str):
141
+ file_suffix: str,
142
+ target_name: str = ""):
141
143
  """
142
144
  Private helper to run the attribution, aggregation, and saving for a single context.
143
145
  """
@@ -214,7 +216,11 @@ def _process_single_target(ig: 'IntegratedGradients', # type: ignore
214
216
  plt.xlabel("Mean Absolute Attribution")
215
217
 
216
218
  title = "Feature Importance"
217
- if file_suffix:
219
+
220
+ # Use the original target name if provided, otherwise fallback to suffix logic
221
+ if target_name:
222
+ title += f" ({target_name})"
223
+ elif file_suffix:
218
224
  # Remove the leading underscore for the title
219
225
  clean_suffix = file_suffix.lstrip("_").replace("_", " ")
220
226
  title += f" ({clean_suffix})"
@@ -228,7 +234,9 @@ def _process_single_target(ig: 'IntegratedGradients', # type: ignore
228
234
  plt.savefig(plot_path)
229
235
  plt.close()
230
236
 
231
- _LOGGER.info(f"🔬 Captum explanation for target '{file_suffix.lstrip("_").replace("_", " ")}' saved to '{save_dir.name}'")
237
+ # Use target_name for logging if available, otherwise fallback to cleaning the suffix
238
+ log_name = target_name if target_name else file_suffix.lstrip("_").replace("_", " ")
239
+ _LOGGER.info(f"🔬 Captum explanation for target '{log_name}' saved to '{save_dir.name}'")
232
240
 
233
241
 
234
242
  def captum_image_heatmap(model: nn.Module,
@@ -276,6 +276,16 @@ class _BaseDragonTrainer(ABC):
276
276
  except Exception as e:
277
277
  _LOGGER.error(f"Failed to load checkpoint from '{p}': {e}")
278
278
  raise
279
+
280
+ def load_checkpoint(self, path: Union[str, Path], verbose: int = 3):
281
+ """
282
+ Loads a specific checkpoint state into the model, optimizer, and scheduler.
283
+
284
+ Args:
285
+ path (str | Path): Path to the .pth checkpoint file.
286
+ verbose (int): Verbosity level for logging.
287
+ """
288
+ self._load_checkpoint(path=path, verbose=verbose)
279
289
 
280
290
  def fit(self,
281
291
  save_dir: Union[str,Path],
@@ -366,7 +376,7 @@ class _BaseDragonTrainer(ABC):
366
376
  self.device = self._validate_device(device)
367
377
  self.model.to(self.device)
368
378
  _LOGGER.info(f"Trainer and model moved to {self.device}.")
369
-
379
+
370
380
  def _load_model_state_wrapper(self, model_checkpoint: Union[Path, Literal['best', 'current']], verbose: int = 2):
371
381
  """
372
382
  Private helper to load the correct model state_dict based on user's choice.
ml_tools/keys/_keys.py CHANGED
@@ -113,7 +113,7 @@ class PyTorchCheckpointKeys:
113
113
  EPOCH = "epoch"
114
114
  BEST_SCORE = "best_score"
115
115
  HISTORY = "history"
116
- CHECKPOINT_NAME = "PyModelCheckpoint"
116
+ CHECKPOINT_NAME = "DragonCheckpoint"
117
117
 
118
118
  ### Finalized config
119
119
  # EPOCH
@@ -296,8 +296,8 @@ class _EvaluationConfig:
296
296
  DPI = 400
297
297
  LABEL_PADDING = 10
298
298
  # large sizes for SVG layout to accommodate large fonts
299
- REGRESSION_PLOT_SIZE = (12, 8)
300
- SEQUENCE_PLOT_SIZE = (12, 8)
299
+ REGRESSION_PLOT_SIZE = (10, 7)
300
+ SEQUENCE_PLOT_SIZE = (10, 7)
301
301
  CLASSIFICATION_PLOT_SIZE = (9, 9)
302
302
  # Loss plot
303
303
  LOSS_PLOT_SIZE = (18, 9)