dragon-ml-toolbox 19.13.0__py3-none-any.whl → 20.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (219) hide show
  1. {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/METADATA +29 -46
  2. dragon_ml_toolbox-20.0.0.dist-info/RECORD +178 -0
  3. ml_tools/{ETL_cleaning.py → ETL_cleaning/__init__.py} +13 -5
  4. ml_tools/ETL_cleaning/_basic_clean.py +351 -0
  5. ml_tools/ETL_cleaning/_clean_tools.py +128 -0
  6. ml_tools/ETL_cleaning/_dragon_cleaner.py +245 -0
  7. ml_tools/ETL_cleaning/_imprimir.py +13 -0
  8. ml_tools/{ETL_engineering.py → ETL_engineering/__init__.py} +8 -4
  9. ml_tools/ETL_engineering/_dragon_engineering.py +261 -0
  10. ml_tools/ETL_engineering/_imprimir.py +24 -0
  11. ml_tools/{_core/_ETL_engineering.py → ETL_engineering/_transforms.py} +14 -267
  12. ml_tools/{_core → GUI_tools}/_GUI_tools.py +37 -40
  13. ml_tools/{GUI_tools.py → GUI_tools/__init__.py} +7 -5
  14. ml_tools/GUI_tools/_imprimir.py +12 -0
  15. ml_tools/IO_tools/_IO_loggers.py +235 -0
  16. ml_tools/IO_tools/_IO_save_load.py +151 -0
  17. ml_tools/IO_tools/_IO_utils.py +140 -0
  18. ml_tools/{IO_tools.py → IO_tools/__init__.py} +13 -5
  19. ml_tools/IO_tools/_imprimir.py +14 -0
  20. ml_tools/MICE/_MICE_imputation.py +132 -0
  21. ml_tools/{MICE_imputation.py → MICE/__init__.py} +6 -7
  22. ml_tools/{_core/_MICE_imputation.py → MICE/_dragon_mice.py} +243 -322
  23. ml_tools/MICE/_imprimir.py +11 -0
  24. ml_tools/{ML_callbacks.py → ML_callbacks/__init__.py} +12 -4
  25. ml_tools/ML_callbacks/_base.py +101 -0
  26. ml_tools/ML_callbacks/_checkpoint.py +232 -0
  27. ml_tools/ML_callbacks/_early_stop.py +208 -0
  28. ml_tools/ML_callbacks/_imprimir.py +12 -0
  29. ml_tools/ML_callbacks/_scheduler.py +197 -0
  30. ml_tools/{ML_chaining_utilities.py → ML_chain/__init__.py} +8 -3
  31. ml_tools/{_core/_ML_chaining_utilities.py → ML_chain/_chaining_tools.py} +5 -129
  32. ml_tools/ML_chain/_dragon_chain.py +140 -0
  33. ml_tools/ML_chain/_imprimir.py +11 -0
  34. ml_tools/ML_configuration/__init__.py +90 -0
  35. ml_tools/ML_configuration/_base_model_config.py +69 -0
  36. ml_tools/ML_configuration/_finalize.py +366 -0
  37. ml_tools/ML_configuration/_imprimir.py +47 -0
  38. ml_tools/ML_configuration/_metrics.py +593 -0
  39. ml_tools/ML_configuration/_models.py +206 -0
  40. ml_tools/ML_configuration/_training.py +124 -0
  41. ml_tools/ML_datasetmaster/__init__.py +28 -0
  42. ml_tools/ML_datasetmaster/_base_datasetmaster.py +337 -0
  43. ml_tools/{_core/_ML_datasetmaster.py → ML_datasetmaster/_datasetmaster.py} +9 -329
  44. ml_tools/ML_datasetmaster/_imprimir.py +15 -0
  45. ml_tools/{_core/_ML_sequence_datasetmaster.py → ML_datasetmaster/_sequence_datasetmaster.py} +13 -15
  46. ml_tools/{_core/_ML_vision_datasetmaster.py → ML_datasetmaster/_vision_datasetmaster.py} +63 -65
  47. ml_tools/ML_evaluation/__init__.py +53 -0
  48. ml_tools/ML_evaluation/_classification.py +629 -0
  49. ml_tools/ML_evaluation/_feature_importance.py +409 -0
  50. ml_tools/ML_evaluation/_imprimir.py +25 -0
  51. ml_tools/ML_evaluation/_loss.py +92 -0
  52. ml_tools/ML_evaluation/_regression.py +273 -0
  53. ml_tools/{_core/_ML_sequence_evaluation.py → ML_evaluation/_sequence.py} +8 -11
  54. ml_tools/{_core/_ML_vision_evaluation.py → ML_evaluation/_vision.py} +12 -17
  55. ml_tools/{_core → ML_evaluation_captum}/_ML_evaluation_captum.py +11 -38
  56. ml_tools/{ML_evaluation_captum.py → ML_evaluation_captum/__init__.py} +6 -4
  57. ml_tools/ML_evaluation_captum/_imprimir.py +10 -0
  58. ml_tools/{_core → ML_finalize_handler}/_ML_finalize_handler.py +3 -7
  59. ml_tools/ML_finalize_handler/__init__.py +10 -0
  60. ml_tools/ML_finalize_handler/_imprimir.py +8 -0
  61. ml_tools/ML_inference/__init__.py +22 -0
  62. ml_tools/ML_inference/_base_inference.py +166 -0
  63. ml_tools/{_core/_ML_chaining_inference.py → ML_inference/_chain_inference.py} +14 -17
  64. ml_tools/ML_inference/_dragon_inference.py +332 -0
  65. ml_tools/ML_inference/_imprimir.py +11 -0
  66. ml_tools/ML_inference/_multi_inference.py +180 -0
  67. ml_tools/ML_inference_sequence/__init__.py +10 -0
  68. ml_tools/ML_inference_sequence/_imprimir.py +8 -0
  69. ml_tools/{_core/_ML_sequence_inference.py → ML_inference_sequence/_sequence_inference.py} +11 -15
  70. ml_tools/ML_inference_vision/__init__.py +10 -0
  71. ml_tools/ML_inference_vision/_imprimir.py +8 -0
  72. ml_tools/{_core/_ML_vision_inference.py → ML_inference_vision/_vision_inference.py} +15 -19
  73. ml_tools/ML_models/__init__.py +32 -0
  74. ml_tools/{_core/_ML_models_advanced.py → ML_models/_advanced_models.py} +22 -18
  75. ml_tools/ML_models/_base_mlp_attention.py +198 -0
  76. ml_tools/{_core/_models_advanced_base.py → ML_models/_base_save_load.py} +73 -49
  77. ml_tools/ML_models/_dragon_tabular.py +248 -0
  78. ml_tools/ML_models/_imprimir.py +18 -0
  79. ml_tools/ML_models/_mlp_attention.py +134 -0
  80. ml_tools/{_core → ML_models}/_models_advanced_helpers.py +13 -13
  81. ml_tools/ML_models_sequence/__init__.py +10 -0
  82. ml_tools/ML_models_sequence/_imprimir.py +8 -0
  83. ml_tools/{_core/_ML_sequence_models.py → ML_models_sequence/_sequence_models.py} +5 -8
  84. ml_tools/ML_models_vision/__init__.py +29 -0
  85. ml_tools/ML_models_vision/_base_wrapper.py +254 -0
  86. ml_tools/ML_models_vision/_image_classification.py +182 -0
  87. ml_tools/ML_models_vision/_image_segmentation.py +108 -0
  88. ml_tools/ML_models_vision/_imprimir.py +16 -0
  89. ml_tools/ML_models_vision/_object_detection.py +135 -0
  90. ml_tools/ML_optimization/__init__.py +21 -0
  91. ml_tools/ML_optimization/_imprimir.py +13 -0
  92. ml_tools/{_core/_ML_optimization_pareto.py → ML_optimization/_multi_dragon.py} +18 -24
  93. ml_tools/ML_optimization/_single_dragon.py +203 -0
  94. ml_tools/{_core/_ML_optimization.py → ML_optimization/_single_manual.py} +75 -213
  95. ml_tools/{_core → ML_scaler}/_ML_scaler.py +8 -11
  96. ml_tools/ML_scaler/__init__.py +10 -0
  97. ml_tools/ML_scaler/_imprimir.py +8 -0
  98. ml_tools/ML_trainer/__init__.py +20 -0
  99. ml_tools/ML_trainer/_base_trainer.py +297 -0
  100. ml_tools/ML_trainer/_dragon_detection_trainer.py +402 -0
  101. ml_tools/ML_trainer/_dragon_sequence_trainer.py +540 -0
  102. ml_tools/ML_trainer/_dragon_trainer.py +1160 -0
  103. ml_tools/ML_trainer/_imprimir.py +10 -0
  104. ml_tools/{ML_utilities.py → ML_utilities/__init__.py} +14 -6
  105. ml_tools/ML_utilities/_artifact_finder.py +382 -0
  106. ml_tools/ML_utilities/_imprimir.py +16 -0
  107. ml_tools/ML_utilities/_inspection.py +325 -0
  108. ml_tools/ML_utilities/_train_tools.py +205 -0
  109. ml_tools/{ML_vision_transformers.py → ML_vision_transformers/__init__.py} +9 -6
  110. ml_tools/{_core/_ML_vision_transformers.py → ML_vision_transformers/_core_transforms.py} +11 -155
  111. ml_tools/ML_vision_transformers/_imprimir.py +14 -0
  112. ml_tools/ML_vision_transformers/_offline_augmentation.py +159 -0
  113. ml_tools/{_core/_PSO_optimization.py → PSO_optimization/_PSO.py} +58 -15
  114. ml_tools/{PSO_optimization.py → PSO_optimization/__init__.py} +5 -3
  115. ml_tools/PSO_optimization/_imprimir.py +10 -0
  116. ml_tools/SQL/__init__.py +7 -0
  117. ml_tools/{_core/_SQL.py → SQL/_dragon_SQL.py} +7 -11
  118. ml_tools/SQL/_imprimir.py +8 -0
  119. ml_tools/{_core → VIF}/_VIF_factor.py +5 -8
  120. ml_tools/{VIF_factor.py → VIF/__init__.py} +4 -2
  121. ml_tools/VIF/_imprimir.py +10 -0
  122. ml_tools/_core/__init__.py +7 -1
  123. ml_tools/_core/_logger.py +8 -18
  124. ml_tools/_core/_schema_load_ops.py +43 -0
  125. ml_tools/_core/_script_info.py +2 -2
  126. ml_tools/{data_exploration.py → data_exploration/__init__.py} +32 -16
  127. ml_tools/data_exploration/_analysis.py +214 -0
  128. ml_tools/data_exploration/_cleaning.py +566 -0
  129. ml_tools/data_exploration/_features.py +583 -0
  130. ml_tools/data_exploration/_imprimir.py +32 -0
  131. ml_tools/data_exploration/_plotting.py +487 -0
  132. ml_tools/data_exploration/_schema_ops.py +176 -0
  133. ml_tools/{ensemble_evaluation.py → ensemble_evaluation/__init__.py} +6 -4
  134. ml_tools/{_core → ensemble_evaluation}/_ensemble_evaluation.py +3 -7
  135. ml_tools/ensemble_evaluation/_imprimir.py +14 -0
  136. ml_tools/{ensemble_inference.py → ensemble_inference/__init__.py} +5 -3
  137. ml_tools/{_core → ensemble_inference}/_ensemble_inference.py +15 -18
  138. ml_tools/ensemble_inference/_imprimir.py +9 -0
  139. ml_tools/{ensemble_learning.py → ensemble_learning/__init__.py} +4 -6
  140. ml_tools/{_core → ensemble_learning}/_ensemble_learning.py +7 -10
  141. ml_tools/ensemble_learning/_imprimir.py +10 -0
  142. ml_tools/{excel_handler.py → excel_handler/__init__.py} +5 -3
  143. ml_tools/{_core → excel_handler}/_excel_handler.py +6 -10
  144. ml_tools/excel_handler/_imprimir.py +13 -0
  145. ml_tools/{keys.py → keys/__init__.py} +4 -1
  146. ml_tools/keys/_imprimir.py +11 -0
  147. ml_tools/{_core → keys}/_keys.py +2 -0
  148. ml_tools/{math_utilities.py → math_utilities/__init__.py} +5 -2
  149. ml_tools/math_utilities/_imprimir.py +11 -0
  150. ml_tools/{_core → math_utilities}/_math_utilities.py +1 -5
  151. ml_tools/{optimization_tools.py → optimization_tools/__init__.py} +9 -4
  152. ml_tools/optimization_tools/_imprimir.py +13 -0
  153. ml_tools/optimization_tools/_optimization_bounds.py +236 -0
  154. ml_tools/optimization_tools/_optimization_plots.py +218 -0
  155. ml_tools/{path_manager.py → path_manager/__init__.py} +6 -3
  156. ml_tools/{_core/_path_manager.py → path_manager/_dragonmanager.py} +11 -347
  157. ml_tools/path_manager/_imprimir.py +15 -0
  158. ml_tools/path_manager/_path_tools.py +346 -0
  159. ml_tools/plot_fonts/__init__.py +8 -0
  160. ml_tools/plot_fonts/_imprimir.py +8 -0
  161. ml_tools/{_core → plot_fonts}/_plot_fonts.py +2 -5
  162. ml_tools/schema/__init__.py +15 -0
  163. ml_tools/schema/_feature_schema.py +223 -0
  164. ml_tools/schema/_gui_schema.py +191 -0
  165. ml_tools/schema/_imprimir.py +10 -0
  166. ml_tools/{serde.py → serde/__init__.py} +4 -2
  167. ml_tools/serde/_imprimir.py +10 -0
  168. ml_tools/{_core → serde}/_serde.py +3 -8
  169. ml_tools/{utilities.py → utilities/__init__.py} +11 -6
  170. ml_tools/utilities/_imprimir.py +18 -0
  171. ml_tools/{_core/_utilities.py → utilities/_utility_save_load.py} +13 -190
  172. ml_tools/utilities/_utility_tools.py +192 -0
  173. dragon_ml_toolbox-19.13.0.dist-info/RECORD +0 -111
  174. ml_tools/ML_chaining_inference.py +0 -8
  175. ml_tools/ML_configuration.py +0 -86
  176. ml_tools/ML_configuration_pytab.py +0 -14
  177. ml_tools/ML_datasetmaster.py +0 -10
  178. ml_tools/ML_evaluation.py +0 -16
  179. ml_tools/ML_evaluation_multi.py +0 -12
  180. ml_tools/ML_finalize_handler.py +0 -8
  181. ml_tools/ML_inference.py +0 -12
  182. ml_tools/ML_models.py +0 -14
  183. ml_tools/ML_models_advanced.py +0 -14
  184. ml_tools/ML_models_pytab.py +0 -14
  185. ml_tools/ML_optimization.py +0 -14
  186. ml_tools/ML_optimization_pareto.py +0 -8
  187. ml_tools/ML_scaler.py +0 -8
  188. ml_tools/ML_sequence_datasetmaster.py +0 -8
  189. ml_tools/ML_sequence_evaluation.py +0 -10
  190. ml_tools/ML_sequence_inference.py +0 -8
  191. ml_tools/ML_sequence_models.py +0 -8
  192. ml_tools/ML_trainer.py +0 -12
  193. ml_tools/ML_vision_datasetmaster.py +0 -12
  194. ml_tools/ML_vision_evaluation.py +0 -10
  195. ml_tools/ML_vision_inference.py +0 -8
  196. ml_tools/ML_vision_models.py +0 -18
  197. ml_tools/SQL.py +0 -8
  198. ml_tools/_core/_ETL_cleaning.py +0 -694
  199. ml_tools/_core/_IO_tools.py +0 -498
  200. ml_tools/_core/_ML_callbacks.py +0 -702
  201. ml_tools/_core/_ML_configuration.py +0 -1332
  202. ml_tools/_core/_ML_configuration_pytab.py +0 -102
  203. ml_tools/_core/_ML_evaluation.py +0 -867
  204. ml_tools/_core/_ML_evaluation_multi.py +0 -544
  205. ml_tools/_core/_ML_inference.py +0 -646
  206. ml_tools/_core/_ML_models.py +0 -668
  207. ml_tools/_core/_ML_models_pytab.py +0 -693
  208. ml_tools/_core/_ML_trainer.py +0 -2323
  209. ml_tools/_core/_ML_utilities.py +0 -886
  210. ml_tools/_core/_ML_vision_models.py +0 -644
  211. ml_tools/_core/_data_exploration.py +0 -1901
  212. ml_tools/_core/_optimization_tools.py +0 -493
  213. ml_tools/_core/_schema.py +0 -359
  214. ml_tools/plot_fonts.py +0 -8
  215. ml_tools/schema.py +0 -12
  216. {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/WHEEL +0 -0
  217. {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/licenses/LICENSE +0 -0
  218. {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +0 -0
  219. {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,180 @@
1
+ import torch
2
+ import numpy as np
3
+ from typing import Union, Literal, Any
4
+
5
+ from .._core import get_logger
6
+ from ..keys._keys import PyTorchInferenceKeys, MLTaskKeys
7
+
8
+ from ._dragon_inference import DragonInferenceHandler
9
+
10
+
11
+ _LOGGER = get_logger("Multi Inference")
12
+
13
+
14
+ __all__ = [
15
+ "multi_inference_regression",
16
+ "multi_inference_classification"
17
+ ]
18
+
19
+
20
+ def multi_inference_regression(handlers: list[DragonInferenceHandler],
21
+ feature_vector: Union[np.ndarray, torch.Tensor],
22
+ output: Literal["numpy","torch"]="numpy") -> dict[str,Any]:
23
+ """
24
+ Performs regression inference using multiple models on a single feature vector.
25
+
26
+ This function iterates through a list of DragonInferenceHandler objects,
27
+ each configured for a different regression target. It runs a prediction for
28
+ each handler using the same input feature vector and returns the results
29
+ in a dictionary.
30
+
31
+ The function adapts its behavior based on the input dimensions:
32
+ - 1D input: Returns a dictionary mapping target ID to a single value.
33
+ - 2D input: Returns a dictionary mapping target ID to a list of values.
34
+
35
+ Args:
36
+ handlers (list[DragonInferenceHandler]): A list of initialized inference
37
+ handlers. Each handler must have a unique `target_id` and be configured with `task="regression"`.
38
+ feature_vector (Union[np.ndarray, torch.Tensor]): An input sample (1D) or a batch of samples (2D) to be fed into each regression model.
39
+ output (Literal["numpy", "torch"], optional): The desired format for the output predictions.
40
+ - "numpy": Returns predictions as Python scalars or NumPy arrays.
41
+ - "torch": Returns predictions as PyTorch tensors.
42
+
43
+ Returns:
44
+ (dict[str, Any]): A dictionary mapping each handler's `target_id` to its
45
+ predicted regression values.
46
+
47
+ Raises:
48
+ AttributeError: If any handler in the list is missing a `target_id`.
49
+ ValueError: If any handler's `task` is not 'regression' or if the input `feature_vector` is not 1D or 2D.
50
+ """
51
+ # check batch dimension
52
+ is_single_sample = feature_vector.ndim == 1
53
+
54
+ # Reshape a 1D vector to a 2D batch of one for uniform processing.
55
+ if is_single_sample:
56
+ feature_vector = feature_vector.reshape(1, -1)
57
+
58
+ # Validate that the input is a 2D tensor.
59
+ if feature_vector.ndim != 2:
60
+ _LOGGER.error("Input feature_vector must be a 1D or 2D array/tensor.")
61
+ raise ValueError()
62
+
63
+ results: dict[str,Any] = dict()
64
+ for handler in handlers:
65
+ # validation
66
+ if handler.target_ids is None:
67
+ _LOGGER.error("All inference handlers must have a 'target_ids' attribute.")
68
+ raise AttributeError()
69
+ if handler.task != MLTaskKeys.REGRESSION:
70
+ _LOGGER.error(f"Invalid task type: The handler for target_id '{handler.target_ids[0]}' is for '{handler.task}', only single target regression tasks are supported.")
71
+ raise ValueError()
72
+
73
+ # inference
74
+ if output == "numpy":
75
+ # This path returns NumPy arrays or standard Python scalars
76
+ numpy_result = handler.predict_batch_numpy(feature_vector)[PyTorchInferenceKeys.PREDICTIONS]
77
+ if is_single_sample:
78
+ # For a single sample, convert the 1-element array to a Python scalar
79
+ results[handler.target_ids[0]] = numpy_result.item()
80
+ else:
81
+ # For a batch, return the full NumPy array of predictions
82
+ results[handler.target_ids[0]] = numpy_result
83
+
84
+ else: # output == "torch"
85
+ # This path returns PyTorch tensors on the model's device
86
+ torch_result = handler.predict_batch(feature_vector)[PyTorchInferenceKeys.PREDICTIONS]
87
+ if is_single_sample:
88
+ # For a single sample, return the 0-dim tensor
89
+ results[handler.target_ids[0]] = torch_result[0]
90
+ else:
91
+ # For a batch, return the full tensor of predictions
92
+ results[handler.target_ids[0]] = torch_result
93
+
94
+ return results
95
+
96
+
97
+ def multi_inference_classification(
98
+ handlers: list[DragonInferenceHandler],
99
+ feature_vector: Union[np.ndarray, torch.Tensor],
100
+ output: Literal["numpy","torch"]="numpy"
101
+ ) -> tuple[dict[str, Any], dict[str, Any]]:
102
+ """
103
+ Performs classification inference on a single sample or a batch.
104
+
105
+ This function iterates through a list of DragonInferenceHandler objects,
106
+ each configured for a different classification target. It returns two
107
+ dictionaries: one for the predicted labels and one for the probabilities.
108
+
109
+ The function adapts its behavior based on the input dimensions:
110
+ - 1D input: The dictionaries map target ID to a single label and a single probability array.
111
+ - 2D input: The dictionaries map target ID to an array of labels and an array of probability arrays.
112
+
113
+ Args:
114
+ handlers (list[DragonInferenceHandler]): A list of initialized inference handlers. Each must have a unique `target_id` and be configured
115
+ with `task="classification"`.
116
+ feature_vector (Union[np.ndarray, torch.Tensor]): An input sample (1D)
117
+ or a batch of samples (2D) for prediction.
118
+ output (Literal["numpy", "torch"], optional): The desired format for the
119
+ output predictions.
120
+
121
+ Returns:
122
+ (tuple[dict[str, Any], dict[str, Any]]): A tuple containing two dictionaries:
123
+ 1. A dictionary mapping `target_id` to the predicted label(s).
124
+ 2. A dictionary mapping `target_id` to the prediction probabilities.
125
+
126
+ Raises:
127
+ AttributeError: If any handler in the list is missing a `target_id`.
128
+ ValueError: If any handler's `task` is not 'classification' or if the input `feature_vector` is not 1D or 2D.
129
+ """
130
+ # Store if the original input was a single sample
131
+ is_single_sample = feature_vector.ndim == 1
132
+
133
+ # Reshape a 1D vector to a 2D batch of one for uniform processing
134
+ if is_single_sample:
135
+ feature_vector = feature_vector.reshape(1, -1)
136
+
137
+ if feature_vector.ndim != 2:
138
+ _LOGGER.error("Input feature_vector must be a 1D or 2D array/tensor.")
139
+ raise ValueError()
140
+
141
+ # Initialize two dictionaries for results
142
+ labels_results: dict[str, Any] = dict()
143
+ probs_results: dict[str, Any] = dict()
144
+
145
+ for handler in handlers:
146
+ # Validation
147
+ if handler.target_ids is None:
148
+ _LOGGER.error("All inference handlers must have a 'target_id' attribute.")
149
+ raise AttributeError()
150
+ if handler.task not in [MLTaskKeys.BINARY_CLASSIFICATION, MLTaskKeys.MULTICLASS_CLASSIFICATION]:
151
+ _LOGGER.error(f"Invalid task type: The handler for target_id '{handler.target_ids[0]}' is for '{handler.task}', but this function only supports binary and multiclass classification.")
152
+ raise ValueError()
153
+
154
+ # Inference
155
+ if output == "numpy":
156
+ # predict_batch_numpy returns a dict of NumPy arrays
157
+ result = handler.predict_batch_numpy(feature_vector)
158
+ else: # torch
159
+ # predict_batch returns a dict of Torch tensors
160
+ result = handler.predict_batch(feature_vector)
161
+
162
+ labels = result[PyTorchInferenceKeys.LABELS]
163
+ probabilities = result[PyTorchInferenceKeys.PROBABILITIES]
164
+
165
+ if is_single_sample:
166
+ # For "numpy", convert the single label to a Python int scalar.
167
+ # For "torch", get the 0-dim tensor label.
168
+ if output == "numpy":
169
+ labels_results[handler.target_ids[0]] = labels.item()
170
+ else: # torch
171
+ labels_results[handler.target_ids[0]] = labels[0]
172
+
173
+ # The probabilities are an array/tensor of values
174
+ probs_results[handler.target_ids[0]] = probabilities[0]
175
+ else:
176
+ labels_results[handler.target_ids[0]] = labels
177
+ probs_results[handler.target_ids[0]] = probabilities
178
+
179
+ return labels_results, probs_results
180
+
@@ -0,0 +1,10 @@
1
+ from ._sequence_inference import (
2
+ DragonSequenceInferenceHandler
3
+ )
4
+
5
+ from ._imprimir import info
6
+
7
+
8
+ __all__ = [
9
+ "DragonSequenceInferenceHandler"
10
+ ]
@@ -0,0 +1,8 @@
1
+ from .._core import _imprimir_disponibles
2
+
3
+ _GRUPOS = [
4
+ "DragonSequenceInferenceHandler"
5
+ ]
6
+
7
+ def info():
8
+ _imprimir_disponibles(_GRUPOS)
@@ -2,19 +2,18 @@ import torch
2
2
  from torch import nn
3
3
  import numpy as np
4
4
  from pathlib import Path
5
- from typing import Union, Literal, Dict, Any, Optional
5
+ from typing import Union, Literal, Any, Optional
6
6
  import matplotlib.pyplot as plt
7
7
  import seaborn as sns
8
8
 
9
- from ._ML_scaler import DragonScaler
10
- from ._script_info import _script_info
11
- from ._logger import get_logger
12
- from ._path_manager import make_fullpath, sanitize_filename
13
- from ._keys import PyTorchInferenceKeys, MLTaskKeys, PyTorchCheckpointKeys
14
- from ._ML_inference import _BaseInferenceHandler
9
+ from .._core import get_logger
10
+ from ..path_manager import make_fullpath, sanitize_filename
11
+ from ..keys._keys import PyTorchInferenceKeys, MLTaskKeys, PyTorchCheckpointKeys
15
12
 
13
+ from ..ML_inference._base_inference import _BaseInferenceHandler
16
14
 
17
- _LOGGER = get_logger("Inference Handler")
15
+
16
+ _LOGGER = get_logger("DragonSequenceInference")
18
17
 
19
18
 
20
19
  __all__ = [
@@ -109,7 +108,7 @@ class DragonSequenceInferenceHandler(_BaseInferenceHandler):
109
108
 
110
109
  return scaled_features.to(self.device)
111
110
 
112
- def predict_batch(self, features: Union[np.ndarray, torch.Tensor]) -> Dict[str, torch.Tensor]:
111
+ def predict_batch(self, features: Union[np.ndarray, torch.Tensor]) -> dict[str, torch.Tensor]:
113
112
  """
114
113
  Core batch prediction method for sequences.
115
114
  Runs a batch of sequences through the model, de-scales the output,
@@ -161,7 +160,7 @@ class DragonSequenceInferenceHandler(_BaseInferenceHandler):
161
160
 
162
161
  return {PyTorchInferenceKeys.PREDICTIONS: descaled_output}
163
162
 
164
- def predict(self, features: Union[np.ndarray, torch.Tensor]) -> Dict[str, torch.Tensor]:
163
+ def predict(self, features: Union[np.ndarray, torch.Tensor]) -> dict[str, torch.Tensor]:
165
164
  """
166
165
  Core single-sample prediction method for sequences.
167
166
  Runs a single sequence through the model, de-scales the output,
@@ -187,7 +186,7 @@ class DragonSequenceInferenceHandler(_BaseInferenceHandler):
187
186
 
188
187
  # --- NumPy Convenience Wrappers (on CPU) ---
189
188
 
190
- def predict_batch_numpy(self, features: Union[np.ndarray, torch.Tensor]) -> Dict[str, np.ndarray]:
189
+ def predict_batch_numpy(self, features: Union[np.ndarray, torch.Tensor]) -> dict[str, np.ndarray]:
191
190
  """
192
191
  Convenience wrapper for predict_batch that returns NumPy arrays.
193
192
 
@@ -202,7 +201,7 @@ class DragonSequenceInferenceHandler(_BaseInferenceHandler):
202
201
  numpy_results = {key: value.cpu().numpy() for key, value in tensor_results.items()}
203
202
  return numpy_results
204
203
 
205
- def predict_numpy(self, features: Union[np.ndarray, torch.Tensor]) -> Dict[str, Any]:
204
+ def predict_numpy(self, features: Union[np.ndarray, torch.Tensor]) -> dict[str, Any]:
206
205
  """
207
206
  Convenience wrapper for predict that returns NumPy arrays or scalars.
208
207
 
@@ -371,6 +370,3 @@ class DragonSequenceInferenceHandler(_BaseInferenceHandler):
371
370
  finally:
372
371
  plt.close()
373
372
 
374
-
375
- def info():
376
- _script_info(__all__)
@@ -0,0 +1,10 @@
1
+ from ._vision_inference import (
2
+ DragonVisionInferenceHandler
3
+ )
4
+
5
+ from ._imprimir import info
6
+
7
+
8
+ __all__ = [
9
+ "DragonVisionInferenceHandler",
10
+ ]
@@ -0,0 +1,8 @@
1
+ from .._core import _imprimir_disponibles
2
+
3
+ _GRUPOS = [
4
+ "DragonVisionInferenceHandler"
5
+ ]
6
+
7
+ def info():
8
+ _imprimir_disponibles(_GRUPOS)
@@ -1,19 +1,18 @@
1
1
  import torch
2
2
  from torch import nn
3
- import numpy as np #numpy array return value
4
3
  from pathlib import Path
5
- from typing import Union, Literal, Dict, Any, List, Optional, Callable
4
+ from typing import Union, Literal, Any, Optional, Callable
6
5
  from PIL import Image
7
- from torchvision import transforms
8
6
 
9
- from ._script_info import _script_info
10
- from ._logger import get_logger
11
- from ._keys import PyTorchInferenceKeys, MLTaskKeys
12
- from ._ML_vision_transformers import _load_recipe_and_build_transform
13
- from ._ML_inference import _BaseInferenceHandler
7
+ from ..ML_vision_transformers._core_transforms import _load_recipe_and_build_transform
14
8
 
9
+ from .._core import get_logger
10
+ from ..keys._keys import PyTorchInferenceKeys, MLTaskKeys
15
11
 
16
- _LOGGER = get_logger("Inference Handler")
12
+ from ..ML_inference._base_inference import _BaseInferenceHandler
13
+
14
+
15
+ _LOGGER = get_logger("DragonVisionInference")
17
16
 
18
17
 
19
18
  __all__ = [
@@ -84,7 +83,7 @@ class DragonVisionInferenceHandler(_BaseInferenceHandler):
84
83
  self.set_transform(transform_source)
85
84
  self._is_transformed = True
86
85
 
87
- def _preprocess_batch(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Union[torch.Tensor, List[torch.Tensor]]:
86
+ def _preprocess_batch(self, inputs: Union[torch.Tensor, list[torch.Tensor]]) -> Union[torch.Tensor, list[torch.Tensor]]:
88
87
  """
89
88
  Validates input and moves it to the correct device.
90
89
  - For Classification/Segmentation: Expects 4D Tensor (B, C, H, W).
@@ -135,7 +134,7 @@ class DragonVisionInferenceHandler(_BaseInferenceHandler):
135
134
  _LOGGER.error(f"Invalid transform_source type: {type(transform_source)}. Must be str, Path, or Callable.")
136
135
  raise TypeError("transform_source must be a file path or a Callable.")
137
136
 
138
- def predict_batch(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[str, Any]:
137
+ def predict_batch(self, inputs: Union[torch.Tensor, list[torch.Tensor]]) -> dict[str, Any]:
139
138
  """
140
139
  Core batch prediction method for vision models.
141
140
  All preprocessing (resizing, normalization) should be done *before* calling this method.
@@ -207,7 +206,7 @@ class DragonVisionInferenceHandler(_BaseInferenceHandler):
207
206
  # This should be unreachable due to validation
208
207
  raise ValueError(f"Unknown task: {self.task}")
209
208
 
210
- def predict(self, single_input: torch.Tensor) -> Dict[str, Any]:
209
+ def predict(self, single_input: torch.Tensor) -> dict[str, Any]:
211
210
  """
212
211
  Core single-sample prediction method for vision models.
213
212
  All preprocessing (resizing, normalization) should be done *before*
@@ -248,7 +247,7 @@ class DragonVisionInferenceHandler(_BaseInferenceHandler):
248
247
 
249
248
  # --- NumPy Convenience Wrappers (on CPU) ---
250
249
 
251
- def predict_batch_numpy(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[str, Any]:
250
+ def predict_batch_numpy(self, inputs: Union[torch.Tensor, list[torch.Tensor]]) -> dict[str, Any]:
252
251
  """
253
252
  Convenience wrapper for predict_batch that returns NumPy arrays. With Labels if set.
254
253
 
@@ -296,7 +295,7 @@ class DragonVisionInferenceHandler(_BaseInferenceHandler):
296
295
 
297
296
  return numpy_results
298
297
 
299
- def predict_numpy(self, single_input: torch.Tensor) -> Dict[str, Any]:
298
+ def predict_numpy(self, single_input: torch.Tensor) -> dict[str, Any]:
300
299
  """
301
300
  Convenience wrapper for predict that returns NumPy arrays/scalars.
302
301
 
@@ -345,7 +344,7 @@ class DragonVisionInferenceHandler(_BaseInferenceHandler):
345
344
  PyTorchInferenceKeys.PROBABILITIES: tensor_results[PyTorchInferenceKeys.PROBABILITIES].cpu().numpy()
346
345
  }
347
346
 
348
- def predict_from_pil(self, image: Image.Image) -> Dict[str, Any]:
347
+ def predict_from_pil(self, image: Image.Image) -> dict[str, Any]:
349
348
  """
350
349
  Applies the stored transform to a single PIL image and returns the prediction.
351
350
 
@@ -383,7 +382,7 @@ class DragonVisionInferenceHandler(_BaseInferenceHandler):
383
382
  # Use the existing single-item predict method
384
383
  return self.predict_numpy(transformed_image)
385
384
 
386
- def predict_from_file(self, image_path: Union[str, Path]) -> Dict[str, Any]:
385
+ def predict_from_file(self, image_path: Union[str, Path]) -> dict[str, Any]:
387
386
  """
388
387
  Loads a single image from a file, applies the stored transform, and returns the prediction.
389
388
 
@@ -416,6 +415,3 @@ class DragonVisionInferenceHandler(_BaseInferenceHandler):
416
415
  # Call the PIL-based prediction method
417
416
  return self.predict_from_pil(image)
418
417
 
419
-
420
- def info():
421
- _script_info(__all__)
@@ -0,0 +1,32 @@
1
+ from ._mlp_attention import (
2
+ DragonMLP,
3
+ DragonAttentionMLP,
4
+ DragonMultiHeadAttentionNet
5
+ )
6
+
7
+ from ._advanced_models import (
8
+ DragonGateModel,
9
+ DragonNodeModel,
10
+ DragonAutoInt,
11
+ DragonTabNet
12
+ )
13
+
14
+ from ._dragon_tabular import DragonTabularTransformer
15
+
16
+ from ._imprimir import info
17
+
18
+
19
+ __all__ = [
20
+ # MLP and Attention Models
21
+ "DragonMLP",
22
+ "DragonAttentionMLP",
23
+ "DragonMultiHeadAttentionNet",
24
+ # Tabular Transformer Model
25
+ "DragonTabularTransformer",
26
+ # Advanced Models
27
+ "DragonGateModel",
28
+ "DragonNodeModel",
29
+ "DragonAutoInt",
30
+ "DragonTabNet",
31
+ ]
32
+
@@ -1,12 +1,13 @@
1
1
  import torch
2
2
  import torch.nn as nn
3
3
  import torch.nn.functional as F
4
- from typing import Dict, Any, Optional, Literal
4
+ from typing import Any, Optional, Literal
5
5
 
6
- from ._models_advanced_base import _ArchitectureBuilder
7
- from ._schema import FeatureSchema
8
- from ._logger import get_logger
9
- from ._script_info import _script_info
6
+ from ..schema import FeatureSchema
7
+ from .._core import get_logger
8
+ from ..keys._keys import SchemaKeys
9
+
10
+ from ._base_save_load import _ArchitectureBuilder
10
11
  from ._models_advanced_helpers import (
11
12
  Embedding1dLayer,
12
13
  GatedFeatureLearningUnit,
@@ -335,16 +336,16 @@ class DragonGateModel(_ArchitectureBuilder):
335
336
  if self.head.T0.shape == mean_target.shape:
336
337
  self.head.T0.data = mean_target
337
338
  _LOGGER.info(f"Initialized T0 to {mean_target.cpu().numpy()}")
338
- elif self.head.T0.numel() == 1 and mean_target.numel() == 1:
339
+ elif self.head.T0.numel() == 1 and mean_target.numel() == 1: # type: ignore
339
340
  # scalar case
340
- self.head.T0.data = mean_target.view(self.head.T0.shape)
341
+ self.head.T0.data = mean_target.view(self.head.T0.shape) # type: ignore
341
342
  _LOGGER.info("GATE Initialization Complete. Ready to train.")
342
343
  # _LOGGER.info(f"Initialized T0 to {mean_target.item()}")
343
344
  else:
344
345
  _LOGGER.debug(f"Target shape mismatch for T0 init. Model: {self.head.T0.shape}, Data: {mean_target.shape}")
345
346
  _LOGGER.warning(f"GATE initialization skipped due to shape mismatch:\n Model: {self.head.T0.shape}\n Data: {mean_target.shape}")
346
347
 
347
- def get_architecture_config(self) -> Dict[str, Any]:
348
+ def get_architecture_config(self) -> dict[str, Any]:
348
349
  """Returns the full configuration of the model."""
349
350
  schema_dict = {
350
351
  'feature_names': self.schema.feature_names,
@@ -355,7 +356,7 @@ class DragonGateModel(_ArchitectureBuilder):
355
356
  }
356
357
 
357
358
  config = {
358
- 'schema_dict': schema_dict,
359
+ SchemaKeys.SCHEMA_DICT: schema_dict,
359
360
  'out_targets': self.out_targets,
360
361
  **self.model_hparams
361
362
  }
@@ -574,7 +575,7 @@ class DragonNodeModel(_ArchitectureBuilder):
574
575
  else:
575
576
  _LOGGER.warning("NODE Backbone does not have an 'initialize' method. Skipping.")
576
577
 
577
- def get_architecture_config(self) -> Dict[str, Any]:
578
+ def get_architecture_config(self) -> dict[str, Any]:
578
579
  """Returns the full configuration of the model."""
579
580
  schema_dict = {
580
581
  'feature_names': self.schema.feature_names,
@@ -585,7 +586,7 @@ class DragonNodeModel(_ArchitectureBuilder):
585
586
  }
586
587
 
587
588
  config = {
588
- 'schema_dict': schema_dict,
589
+ SchemaKeys.SCHEMA_DICT: schema_dict,
589
590
  'out_targets': self.out_targets,
590
591
  **self.model_hparams
591
592
  }
@@ -842,7 +843,7 @@ class DragonAutoInt(_ArchitectureBuilder):
842
843
  else:
843
844
  _LOGGER.warning("AutoInt Head does not have a bias parameter. Skipping initialization.")
844
845
 
845
- def get_architecture_config(self) -> Dict[str, Any]:
846
+ def get_architecture_config(self) -> dict[str, Any]:
846
847
  """Returns the full configuration of the model."""
847
848
  schema_dict = {
848
849
  'feature_names': self.schema.feature_names,
@@ -853,7 +854,7 @@ class DragonAutoInt(_ArchitectureBuilder):
853
854
  }
854
855
 
855
856
  config = {
856
- 'schema_dict': schema_dict,
857
+ SchemaKeys.SCHEMA_DICT: schema_dict,
857
858
  'out_targets': self.out_targets,
858
859
  **self.model_hparams
859
860
  }
@@ -1059,8 +1060,14 @@ class DragonTabNet(_ArchitectureBuilder):
1059
1060
 
1060
1061
  self.regularization_loss /= self.n_steps
1061
1062
  return self.final_mapping(out_accumulated)
1063
+
1064
+ def data_aware_initialization(self, train_dataset, num_samples: int = 2000):
1065
+ """
1066
+ TabNet does not require data-aware initialization. Method Implemented for compatibility.
1067
+ """
1068
+ _LOGGER.info("TabNet does not require data-aware initialization. Skipping.")
1062
1069
 
1063
- def get_architecture_config(self) -> Dict[str, Any]:
1070
+ def get_architecture_config(self) -> dict[str, Any]:
1064
1071
  """Returns the full configuration of the model."""
1065
1072
  schema_dict = {
1066
1073
  'feature_names': self.schema.feature_names,
@@ -1071,12 +1078,9 @@ class DragonTabNet(_ArchitectureBuilder):
1071
1078
  }
1072
1079
 
1073
1080
  config = {
1074
- 'schema_dict': schema_dict,
1081
+ SchemaKeys.SCHEMA_DICT: schema_dict,
1075
1082
  'out_targets': self.out_targets,
1076
1083
  **self.model_hparams
1077
1084
  }
1078
1085
  return config
1079
-
1080
1086
 
1081
- def info():
1082
- _script_info(__all__)