dragon-ml-toolbox 19.13.0__py3-none-any.whl → 20.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (219) hide show
  1. {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/METADATA +29 -46
  2. dragon_ml_toolbox-20.0.0.dist-info/RECORD +178 -0
  3. ml_tools/{ETL_cleaning.py → ETL_cleaning/__init__.py} +13 -5
  4. ml_tools/ETL_cleaning/_basic_clean.py +351 -0
  5. ml_tools/ETL_cleaning/_clean_tools.py +128 -0
  6. ml_tools/ETL_cleaning/_dragon_cleaner.py +245 -0
  7. ml_tools/ETL_cleaning/_imprimir.py +13 -0
  8. ml_tools/{ETL_engineering.py → ETL_engineering/__init__.py} +8 -4
  9. ml_tools/ETL_engineering/_dragon_engineering.py +261 -0
  10. ml_tools/ETL_engineering/_imprimir.py +24 -0
  11. ml_tools/{_core/_ETL_engineering.py → ETL_engineering/_transforms.py} +14 -267
  12. ml_tools/{_core → GUI_tools}/_GUI_tools.py +37 -40
  13. ml_tools/{GUI_tools.py → GUI_tools/__init__.py} +7 -5
  14. ml_tools/GUI_tools/_imprimir.py +12 -0
  15. ml_tools/IO_tools/_IO_loggers.py +235 -0
  16. ml_tools/IO_tools/_IO_save_load.py +151 -0
  17. ml_tools/IO_tools/_IO_utils.py +140 -0
  18. ml_tools/{IO_tools.py → IO_tools/__init__.py} +13 -5
  19. ml_tools/IO_tools/_imprimir.py +14 -0
  20. ml_tools/MICE/_MICE_imputation.py +132 -0
  21. ml_tools/{MICE_imputation.py → MICE/__init__.py} +6 -7
  22. ml_tools/{_core/_MICE_imputation.py → MICE/_dragon_mice.py} +243 -322
  23. ml_tools/MICE/_imprimir.py +11 -0
  24. ml_tools/{ML_callbacks.py → ML_callbacks/__init__.py} +12 -4
  25. ml_tools/ML_callbacks/_base.py +101 -0
  26. ml_tools/ML_callbacks/_checkpoint.py +232 -0
  27. ml_tools/ML_callbacks/_early_stop.py +208 -0
  28. ml_tools/ML_callbacks/_imprimir.py +12 -0
  29. ml_tools/ML_callbacks/_scheduler.py +197 -0
  30. ml_tools/{ML_chaining_utilities.py → ML_chain/__init__.py} +8 -3
  31. ml_tools/{_core/_ML_chaining_utilities.py → ML_chain/_chaining_tools.py} +5 -129
  32. ml_tools/ML_chain/_dragon_chain.py +140 -0
  33. ml_tools/ML_chain/_imprimir.py +11 -0
  34. ml_tools/ML_configuration/__init__.py +90 -0
  35. ml_tools/ML_configuration/_base_model_config.py +69 -0
  36. ml_tools/ML_configuration/_finalize.py +366 -0
  37. ml_tools/ML_configuration/_imprimir.py +47 -0
  38. ml_tools/ML_configuration/_metrics.py +593 -0
  39. ml_tools/ML_configuration/_models.py +206 -0
  40. ml_tools/ML_configuration/_training.py +124 -0
  41. ml_tools/ML_datasetmaster/__init__.py +28 -0
  42. ml_tools/ML_datasetmaster/_base_datasetmaster.py +337 -0
  43. ml_tools/{_core/_ML_datasetmaster.py → ML_datasetmaster/_datasetmaster.py} +9 -329
  44. ml_tools/ML_datasetmaster/_imprimir.py +15 -0
  45. ml_tools/{_core/_ML_sequence_datasetmaster.py → ML_datasetmaster/_sequence_datasetmaster.py} +13 -15
  46. ml_tools/{_core/_ML_vision_datasetmaster.py → ML_datasetmaster/_vision_datasetmaster.py} +63 -65
  47. ml_tools/ML_evaluation/__init__.py +53 -0
  48. ml_tools/ML_evaluation/_classification.py +629 -0
  49. ml_tools/ML_evaluation/_feature_importance.py +409 -0
  50. ml_tools/ML_evaluation/_imprimir.py +25 -0
  51. ml_tools/ML_evaluation/_loss.py +92 -0
  52. ml_tools/ML_evaluation/_regression.py +273 -0
  53. ml_tools/{_core/_ML_sequence_evaluation.py → ML_evaluation/_sequence.py} +8 -11
  54. ml_tools/{_core/_ML_vision_evaluation.py → ML_evaluation/_vision.py} +12 -17
  55. ml_tools/{_core → ML_evaluation_captum}/_ML_evaluation_captum.py +11 -38
  56. ml_tools/{ML_evaluation_captum.py → ML_evaluation_captum/__init__.py} +6 -4
  57. ml_tools/ML_evaluation_captum/_imprimir.py +10 -0
  58. ml_tools/{_core → ML_finalize_handler}/_ML_finalize_handler.py +3 -7
  59. ml_tools/ML_finalize_handler/__init__.py +10 -0
  60. ml_tools/ML_finalize_handler/_imprimir.py +8 -0
  61. ml_tools/ML_inference/__init__.py +22 -0
  62. ml_tools/ML_inference/_base_inference.py +166 -0
  63. ml_tools/{_core/_ML_chaining_inference.py → ML_inference/_chain_inference.py} +14 -17
  64. ml_tools/ML_inference/_dragon_inference.py +332 -0
  65. ml_tools/ML_inference/_imprimir.py +11 -0
  66. ml_tools/ML_inference/_multi_inference.py +180 -0
  67. ml_tools/ML_inference_sequence/__init__.py +10 -0
  68. ml_tools/ML_inference_sequence/_imprimir.py +8 -0
  69. ml_tools/{_core/_ML_sequence_inference.py → ML_inference_sequence/_sequence_inference.py} +11 -15
  70. ml_tools/ML_inference_vision/__init__.py +10 -0
  71. ml_tools/ML_inference_vision/_imprimir.py +8 -0
  72. ml_tools/{_core/_ML_vision_inference.py → ML_inference_vision/_vision_inference.py} +15 -19
  73. ml_tools/ML_models/__init__.py +32 -0
  74. ml_tools/{_core/_ML_models_advanced.py → ML_models/_advanced_models.py} +22 -18
  75. ml_tools/ML_models/_base_mlp_attention.py +198 -0
  76. ml_tools/{_core/_models_advanced_base.py → ML_models/_base_save_load.py} +73 -49
  77. ml_tools/ML_models/_dragon_tabular.py +248 -0
  78. ml_tools/ML_models/_imprimir.py +18 -0
  79. ml_tools/ML_models/_mlp_attention.py +134 -0
  80. ml_tools/{_core → ML_models}/_models_advanced_helpers.py +13 -13
  81. ml_tools/ML_models_sequence/__init__.py +10 -0
  82. ml_tools/ML_models_sequence/_imprimir.py +8 -0
  83. ml_tools/{_core/_ML_sequence_models.py → ML_models_sequence/_sequence_models.py} +5 -8
  84. ml_tools/ML_models_vision/__init__.py +29 -0
  85. ml_tools/ML_models_vision/_base_wrapper.py +254 -0
  86. ml_tools/ML_models_vision/_image_classification.py +182 -0
  87. ml_tools/ML_models_vision/_image_segmentation.py +108 -0
  88. ml_tools/ML_models_vision/_imprimir.py +16 -0
  89. ml_tools/ML_models_vision/_object_detection.py +135 -0
  90. ml_tools/ML_optimization/__init__.py +21 -0
  91. ml_tools/ML_optimization/_imprimir.py +13 -0
  92. ml_tools/{_core/_ML_optimization_pareto.py → ML_optimization/_multi_dragon.py} +18 -24
  93. ml_tools/ML_optimization/_single_dragon.py +203 -0
  94. ml_tools/{_core/_ML_optimization.py → ML_optimization/_single_manual.py} +75 -213
  95. ml_tools/{_core → ML_scaler}/_ML_scaler.py +8 -11
  96. ml_tools/ML_scaler/__init__.py +10 -0
  97. ml_tools/ML_scaler/_imprimir.py +8 -0
  98. ml_tools/ML_trainer/__init__.py +20 -0
  99. ml_tools/ML_trainer/_base_trainer.py +297 -0
  100. ml_tools/ML_trainer/_dragon_detection_trainer.py +402 -0
  101. ml_tools/ML_trainer/_dragon_sequence_trainer.py +540 -0
  102. ml_tools/ML_trainer/_dragon_trainer.py +1160 -0
  103. ml_tools/ML_trainer/_imprimir.py +10 -0
  104. ml_tools/{ML_utilities.py → ML_utilities/__init__.py} +14 -6
  105. ml_tools/ML_utilities/_artifact_finder.py +382 -0
  106. ml_tools/ML_utilities/_imprimir.py +16 -0
  107. ml_tools/ML_utilities/_inspection.py +325 -0
  108. ml_tools/ML_utilities/_train_tools.py +205 -0
  109. ml_tools/{ML_vision_transformers.py → ML_vision_transformers/__init__.py} +9 -6
  110. ml_tools/{_core/_ML_vision_transformers.py → ML_vision_transformers/_core_transforms.py} +11 -155
  111. ml_tools/ML_vision_transformers/_imprimir.py +14 -0
  112. ml_tools/ML_vision_transformers/_offline_augmentation.py +159 -0
  113. ml_tools/{_core/_PSO_optimization.py → PSO_optimization/_PSO.py} +58 -15
  114. ml_tools/{PSO_optimization.py → PSO_optimization/__init__.py} +5 -3
  115. ml_tools/PSO_optimization/_imprimir.py +10 -0
  116. ml_tools/SQL/__init__.py +7 -0
  117. ml_tools/{_core/_SQL.py → SQL/_dragon_SQL.py} +7 -11
  118. ml_tools/SQL/_imprimir.py +8 -0
  119. ml_tools/{_core → VIF}/_VIF_factor.py +5 -8
  120. ml_tools/{VIF_factor.py → VIF/__init__.py} +4 -2
  121. ml_tools/VIF/_imprimir.py +10 -0
  122. ml_tools/_core/__init__.py +7 -1
  123. ml_tools/_core/_logger.py +8 -18
  124. ml_tools/_core/_schema_load_ops.py +43 -0
  125. ml_tools/_core/_script_info.py +2 -2
  126. ml_tools/{data_exploration.py → data_exploration/__init__.py} +32 -16
  127. ml_tools/data_exploration/_analysis.py +214 -0
  128. ml_tools/data_exploration/_cleaning.py +566 -0
  129. ml_tools/data_exploration/_features.py +583 -0
  130. ml_tools/data_exploration/_imprimir.py +32 -0
  131. ml_tools/data_exploration/_plotting.py +487 -0
  132. ml_tools/data_exploration/_schema_ops.py +176 -0
  133. ml_tools/{ensemble_evaluation.py → ensemble_evaluation/__init__.py} +6 -4
  134. ml_tools/{_core → ensemble_evaluation}/_ensemble_evaluation.py +3 -7
  135. ml_tools/ensemble_evaluation/_imprimir.py +14 -0
  136. ml_tools/{ensemble_inference.py → ensemble_inference/__init__.py} +5 -3
  137. ml_tools/{_core → ensemble_inference}/_ensemble_inference.py +15 -18
  138. ml_tools/ensemble_inference/_imprimir.py +9 -0
  139. ml_tools/{ensemble_learning.py → ensemble_learning/__init__.py} +4 -6
  140. ml_tools/{_core → ensemble_learning}/_ensemble_learning.py +7 -10
  141. ml_tools/ensemble_learning/_imprimir.py +10 -0
  142. ml_tools/{excel_handler.py → excel_handler/__init__.py} +5 -3
  143. ml_tools/{_core → excel_handler}/_excel_handler.py +6 -10
  144. ml_tools/excel_handler/_imprimir.py +13 -0
  145. ml_tools/{keys.py → keys/__init__.py} +4 -1
  146. ml_tools/keys/_imprimir.py +11 -0
  147. ml_tools/{_core → keys}/_keys.py +2 -0
  148. ml_tools/{math_utilities.py → math_utilities/__init__.py} +5 -2
  149. ml_tools/math_utilities/_imprimir.py +11 -0
  150. ml_tools/{_core → math_utilities}/_math_utilities.py +1 -5
  151. ml_tools/{optimization_tools.py → optimization_tools/__init__.py} +9 -4
  152. ml_tools/optimization_tools/_imprimir.py +13 -0
  153. ml_tools/optimization_tools/_optimization_bounds.py +236 -0
  154. ml_tools/optimization_tools/_optimization_plots.py +218 -0
  155. ml_tools/{path_manager.py → path_manager/__init__.py} +6 -3
  156. ml_tools/{_core/_path_manager.py → path_manager/_dragonmanager.py} +11 -347
  157. ml_tools/path_manager/_imprimir.py +15 -0
  158. ml_tools/path_manager/_path_tools.py +346 -0
  159. ml_tools/plot_fonts/__init__.py +8 -0
  160. ml_tools/plot_fonts/_imprimir.py +8 -0
  161. ml_tools/{_core → plot_fonts}/_plot_fonts.py +2 -5
  162. ml_tools/schema/__init__.py +15 -0
  163. ml_tools/schema/_feature_schema.py +223 -0
  164. ml_tools/schema/_gui_schema.py +191 -0
  165. ml_tools/schema/_imprimir.py +10 -0
  166. ml_tools/{serde.py → serde/__init__.py} +4 -2
  167. ml_tools/serde/_imprimir.py +10 -0
  168. ml_tools/{_core → serde}/_serde.py +3 -8
  169. ml_tools/{utilities.py → utilities/__init__.py} +11 -6
  170. ml_tools/utilities/_imprimir.py +18 -0
  171. ml_tools/{_core/_utilities.py → utilities/_utility_save_load.py} +13 -190
  172. ml_tools/utilities/_utility_tools.py +192 -0
  173. dragon_ml_toolbox-19.13.0.dist-info/RECORD +0 -111
  174. ml_tools/ML_chaining_inference.py +0 -8
  175. ml_tools/ML_configuration.py +0 -86
  176. ml_tools/ML_configuration_pytab.py +0 -14
  177. ml_tools/ML_datasetmaster.py +0 -10
  178. ml_tools/ML_evaluation.py +0 -16
  179. ml_tools/ML_evaluation_multi.py +0 -12
  180. ml_tools/ML_finalize_handler.py +0 -8
  181. ml_tools/ML_inference.py +0 -12
  182. ml_tools/ML_models.py +0 -14
  183. ml_tools/ML_models_advanced.py +0 -14
  184. ml_tools/ML_models_pytab.py +0 -14
  185. ml_tools/ML_optimization.py +0 -14
  186. ml_tools/ML_optimization_pareto.py +0 -8
  187. ml_tools/ML_scaler.py +0 -8
  188. ml_tools/ML_sequence_datasetmaster.py +0 -8
  189. ml_tools/ML_sequence_evaluation.py +0 -10
  190. ml_tools/ML_sequence_inference.py +0 -8
  191. ml_tools/ML_sequence_models.py +0 -8
  192. ml_tools/ML_trainer.py +0 -12
  193. ml_tools/ML_vision_datasetmaster.py +0 -12
  194. ml_tools/ML_vision_evaluation.py +0 -10
  195. ml_tools/ML_vision_inference.py +0 -8
  196. ml_tools/ML_vision_models.py +0 -18
  197. ml_tools/SQL.py +0 -8
  198. ml_tools/_core/_ETL_cleaning.py +0 -694
  199. ml_tools/_core/_IO_tools.py +0 -498
  200. ml_tools/_core/_ML_callbacks.py +0 -702
  201. ml_tools/_core/_ML_configuration.py +0 -1332
  202. ml_tools/_core/_ML_configuration_pytab.py +0 -102
  203. ml_tools/_core/_ML_evaluation.py +0 -867
  204. ml_tools/_core/_ML_evaluation_multi.py +0 -544
  205. ml_tools/_core/_ML_inference.py +0 -646
  206. ml_tools/_core/_ML_models.py +0 -668
  207. ml_tools/_core/_ML_models_pytab.py +0 -693
  208. ml_tools/_core/_ML_trainer.py +0 -2323
  209. ml_tools/_core/_ML_utilities.py +0 -886
  210. ml_tools/_core/_ML_vision_models.py +0 -644
  211. ml_tools/_core/_data_exploration.py +0 -1901
  212. ml_tools/_core/_optimization_tools.py +0 -493
  213. ml_tools/_core/_schema.py +0 -359
  214. ml_tools/plot_fonts.py +0 -8
  215. ml_tools/schema.py +0 -12
  216. {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/WHEEL +0 -0
  217. {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/licenses/LICENSE +0 -0
  218. {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +0 -0
  219. {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,182 @@
1
+ from torch import nn
2
+ import torchvision.models as vision_models
3
+ from typing import Literal, Optional
4
+
5
+ from ._base_wrapper import _BaseVisionWrapper
6
+
7
+
8
+ __all__ = [
9
+ "DragonResNet",
10
+ "DragonEfficientNet",
11
+ "DragonVGG",
12
+ ]
13
+
14
+
15
+ # Image classification
16
+ class DragonResNet(_BaseVisionWrapper):
17
+ """
18
+ Image Classification
19
+
20
+ A customizable wrapper for the torchvision ResNet family, compatible
21
+ with saving/loading architecture.
22
+
23
+ This wrapper allows for customizing the model backbone, input channels,
24
+ and the number of output classes for transfer learning.
25
+ """
26
+ def __init__(self,
27
+ num_classes: int,
28
+ in_channels: int = 3,
29
+ model_name: Literal["resnet18", "resnet34", "resnet50", "resnet101", "resnet152"] = 'resnet50',
30
+ init_with_pretrained: bool = False):
31
+ """
32
+ Args:
33
+ num_classes (int):
34
+ Number of output classes for the final layer.
35
+ in_channels (int):
36
+ Number of input channels (e.g., 1 for grayscale, 3 for RGB).
37
+ model_name (str):
38
+ The name of the ResNet model to use (e.g., 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'). Number is the layer count.
39
+ init_with_pretrained (bool):
40
+ If True, initializes the model with weights pretrained on ImageNet. This flag is for initialization only and is NOT saved in the architecture config.
41
+ """
42
+
43
+ weights_enum_name = getattr(vision_models, f"{model_name.upper()}_Weights", None)
44
+
45
+ super().__init__(
46
+ num_classes=num_classes,
47
+ in_channels=in_channels,
48
+ model_name=model_name,
49
+ init_with_pretrained=init_with_pretrained,
50
+ weights_enum_name=weights_enum_name
51
+ )
52
+
53
+ def _get_input_layer(self) -> nn.Conv2d:
54
+ return self.model.conv1
55
+
56
+ def _set_input_layer(self, layer: nn.Conv2d):
57
+ self.model.conv1 = layer
58
+
59
+ def _get_output_layer(self) -> Optional[nn.Linear]:
60
+ return self.model.fc
61
+
62
+ def _set_output_layer(self, layer: nn.Linear):
63
+ self.model.fc = layer
64
+
65
+
66
+ class DragonEfficientNet(_BaseVisionWrapper):
67
+ """
68
+ Image Classification
69
+
70
+ A customizable wrapper for the torchvision EfficientNet family, compatible
71
+ with saving/loading architecture.
72
+
73
+ This wrapper allows for customizing the model backbone, input channels,
74
+ and the number of output classes for transfer learning.
75
+ """
76
+ def __init__(self,
77
+ num_classes: int,
78
+ in_channels: int = 3,
79
+ model_name: str = 'efficientnet_b0',
80
+ init_with_pretrained: bool = False):
81
+ """
82
+ Args:
83
+ num_classes (int):
84
+ Number of output classes for the final layer.
85
+ in_channels (int):
86
+ Number of input channels (e.g., 1 for grayscale, 3 for RGB).
87
+ model_name (str):
88
+ The name of the EfficientNet model to use (e.g., 'efficientnet_b0'
89
+ through 'efficientnet_b7', or 'efficientnet_v2_s', 'efficientnet_v2_m', 'efficientnet_v2_l').
90
+ init_with_pretrained (bool):
91
+ If True, initializes the model with weights pretrained on
92
+ ImageNet. This flag is for initialization only and is
93
+ NOT saved in the architecture config. Defaults to False.
94
+ """
95
+
96
+ weights_enum_name = getattr(vision_models, f"{model_name.upper()}_Weights", None)
97
+
98
+ super().__init__(
99
+ num_classes=num_classes,
100
+ in_channels=in_channels,
101
+ model_name=model_name,
102
+ init_with_pretrained=init_with_pretrained,
103
+ weights_enum_name=weights_enum_name
104
+ )
105
+
106
+ def _get_input_layer(self) -> nn.Conv2d:
107
+ # The first conv layer in EfficientNet is model.features[0][0]
108
+ return self.model.features[0][0]
109
+
110
+ def _set_input_layer(self, layer: nn.Conv2d):
111
+ self.model.features[0][0] = layer
112
+
113
+ def _get_output_layer(self) -> Optional[nn.Linear]:
114
+ # The classifier in EfficientNet is model.classifier[1]
115
+ if hasattr(self.model, 'classifier') and isinstance(self.model.classifier, nn.Sequential):
116
+ output_layer = self.model.classifier[1]
117
+ if isinstance(output_layer, nn.Linear):
118
+ return output_layer
119
+ return None
120
+
121
+ def _set_output_layer(self, layer: nn.Linear):
122
+ self.model.classifier[1] = layer
123
+
124
+
125
+ class DragonVGG(_BaseVisionWrapper):
126
+ """
127
+ Image Classification
128
+
129
+ A customizable wrapper for the torchvision VGG family, compatible
130
+ with saving/loading architecture.
131
+
132
+ This wrapper allows for customizing the model backbone, input channels,
133
+ and the number of output classes for transfer learning.
134
+ """
135
+ def __init__(self,
136
+ num_classes: int,
137
+ in_channels: int = 3,
138
+ model_name: Literal["vgg11", "vgg13", "vgg16", "vgg19", "vgg11_bn", "vgg13_bn", "vgg16_bn", "vgg19_bn"] = 'vgg16',
139
+ init_with_pretrained: bool = False):
140
+ """
141
+ Args:
142
+ num_classes (int):
143
+ Number of output classes for the final layer.
144
+ in_channels (int):
145
+ Number of input channels (e.g., 1 for grayscale, 3 for RGB).
146
+ model_name (str):
147
+ The name of the VGG model to use (e.g., 'vgg16', 'vgg16_bn').
148
+ init_with_pretrained (bool):
149
+ If True, initializes the model with weights pretrained on
150
+ ImageNet. This flag is for initialization only and is
151
+ NOT saved in the architecture config. Defaults to False.
152
+ """
153
+
154
+ # Format model name to find weights enum, e.g., vgg16_bn -> VGG16_BN_Weights
155
+ weights_enum_name = f"{model_name.replace('_bn', '_BN').upper()}_Weights"
156
+
157
+ super().__init__(
158
+ num_classes=num_classes,
159
+ in_channels=in_channels,
160
+ model_name=model_name,
161
+ init_with_pretrained=init_with_pretrained,
162
+ weights_enum_name=weights_enum_name
163
+ )
164
+
165
+ def _get_input_layer(self) -> nn.Conv2d:
166
+ # The first conv layer in VGG is model.features[0]
167
+ return self.model.features[0]
168
+
169
+ def _set_input_layer(self, layer: nn.Conv2d):
170
+ self.model.features[0] = layer
171
+
172
+ def _get_output_layer(self) -> Optional[nn.Linear]:
173
+ # The final classifier in VGG is model.classifier[6]
174
+ if hasattr(self.model, 'classifier') and isinstance(self.model.classifier, nn.Sequential) and len(self.model.classifier) == 7:
175
+ output_layer = self.model.classifier[6]
176
+ if isinstance(output_layer, nn.Linear):
177
+ return output_layer
178
+ return None
179
+
180
+ def _set_output_layer(self, layer: nn.Linear):
181
+ self.model.classifier[6] = layer
182
+
@@ -0,0 +1,108 @@
1
+ from torch import nn
2
+ from typing import Literal
3
+
4
+ from ._base_wrapper import _BaseSegmentationWrapper
5
+
6
+
7
+ __all__ = [
8
+ "DragonFCN",
9
+ "DragonDeepLabv3",
10
+ ]
11
+
12
+
13
+ class DragonFCN(_BaseSegmentationWrapper):
14
+ """
15
+ Image Segmentation
16
+
17
+ A customizable wrapper for the torchvision FCN (Fully Convolutional Network)
18
+ family, compatible with saving/loading architecture.
19
+
20
+ This wrapper allows for customizing the model backbone, input channels,
21
+ and the number of output classes for transfer learning.
22
+ """
23
+ def __init__(self,
24
+ num_classes: int,
25
+ in_channels: int = 3,
26
+ model_name: Literal["fcn_resnet50", "fcn_resnet101"] = 'fcn_resnet50',
27
+ init_with_pretrained: bool = False):
28
+ """
29
+ Args:
30
+ num_classes (int):
31
+ Number of output classes (including background).
32
+ in_channels (int):
33
+ Number of input channels (e.g., 1 for grayscale, 3 for RGB).
34
+ model_name (str):
35
+ The name of the FCN model to use ('fcn_resnet50' or 'fcn_resnet101').
36
+ init_with_pretrained (bool):
37
+ If True, initializes the model with weights pretrained on COCO.
38
+ This flag is for initialization only and is NOT saved in the
39
+ architecture config. Defaults to False.
40
+ """
41
+ # Format model name to find weights enum, e.g., fcn_resnet50 -> FCN_ResNet50_Weights
42
+ weights_model_name = model_name.replace('fcn_', 'FCN_').replace('resnet', 'ResNet')
43
+ weights_enum_name = f"{weights_model_name}_Weights"
44
+
45
+ super().__init__(
46
+ num_classes=num_classes,
47
+ in_channels=in_channels,
48
+ model_name=model_name,
49
+ init_with_pretrained=init_with_pretrained,
50
+ weights_enum_name=weights_enum_name
51
+ )
52
+
53
+ def _get_input_layer(self) -> nn.Conv2d:
54
+ # FCN models use a ResNet backbone, input layer is backbone.conv1
55
+ return self.model.backbone.conv1
56
+
57
+ def _set_input_layer(self, layer: nn.Conv2d):
58
+ self.model.backbone.conv1 = layer
59
+
60
+
61
+ class DragonDeepLabv3(_BaseSegmentationWrapper):
62
+ """
63
+ Image Segmentation
64
+
65
+ A customizable wrapper for the torchvision DeepLabv3 family, compatible
66
+ with saving/loading architecture.
67
+
68
+ This wrapper allows for customizing the model backbone, input channels,
69
+ and the number of output classes for transfer learning.
70
+ """
71
+ def __init__(self,
72
+ num_classes: int,
73
+ in_channels: int = 3,
74
+ model_name: Literal["deeplabv3_resnet50", "deeplabv3_resnet101"] = 'deeplabv3_resnet50',
75
+ init_with_pretrained: bool = False):
76
+ """
77
+ Args:
78
+ num_classes (int):
79
+ Number of output classes (including background).
80
+ in_channels (int):
81
+ Number of input channels (e.g., 1 for grayscale, 3 for RGB).
82
+ model_name (str):
83
+ The name of the DeepLabv3 model to use ('deeplabv3_resnet50' or 'deeplabv3_resnet101').
84
+ init_with_pretrained (bool):
85
+ If True, initializes the model with weights pretrained on COCO.
86
+ This flag is for initialization only and is NOT saved in the
87
+ architecture config. Defaults to False.
88
+ """
89
+
90
+ # Format model name to find weights enum, e.g., deeplabv3_resnet50 -> DeepLabV3_ResNet50_Weights
91
+ weights_model_name = model_name.replace('deeplabv3_', 'DeepLabV3_').replace('resnet', 'ResNet')
92
+ weights_enum_name = f"{weights_model_name}_Weights"
93
+
94
+ super().__init__(
95
+ num_classes=num_classes,
96
+ in_channels=in_channels,
97
+ model_name=model_name,
98
+ init_with_pretrained=init_with_pretrained,
99
+ weights_enum_name=weights_enum_name
100
+ )
101
+
102
+ def _get_input_layer(self) -> nn.Conv2d:
103
+ # DeepLabv3 models use a ResNet backbone, input layer is backbone.conv1
104
+ return self.model.backbone.conv1
105
+
106
+ def _set_input_layer(self, layer: nn.Conv2d):
107
+ self.model.backbone.conv1 = layer
108
+
@@ -0,0 +1,16 @@
1
+ from .._core import _imprimir_disponibles
2
+
3
+ _GRUPOS = [
4
+ # Image Classification
5
+ "DragonResNet",
6
+ "DragonEfficientNet",
7
+ "DragonVGG",
8
+ # Image Segmentation
9
+ "DragonFCN",
10
+ "DragonDeepLabv3",
11
+ # Object Detection
12
+ "DragonFastRCNN",
13
+ ]
14
+
15
+ def info():
16
+ _imprimir_disponibles(_GRUPOS)
@@ -0,0 +1,135 @@
1
+ import torch
2
+ from torch import nn
3
+ from torchvision.models import detection as detection_models
4
+ from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
5
+ from typing import Any, Literal, Optional
6
+
7
+ from ..ML_models._base_save_load import _ArchitectureHandlerMixin
8
+
9
+ from .._core import get_logger
10
+
11
+
12
+ _LOGGER = get_logger("DragonFastRCNN")
13
+
14
+
15
+ __all__ = [
16
+ "DragonFastRCNN",
17
+ ]
18
+
19
+
20
+ # Object Detection
21
+ class DragonFastRCNN(nn.Module, _ArchitectureHandlerMixin):
22
+ """
23
+ Object Detection
24
+
25
+ A customizable wrapper for the torchvision Faster R-CNN family.
26
+
27
+ This wrapper allows for customizing the model backbone, input channels,
28
+ and the number of output classes for transfer learning.
29
+
30
+ NOTE: Use an Object Detection compatible trainer.
31
+ """
32
+ def __init__(self,
33
+ num_classes: int,
34
+ in_channels: int = 3,
35
+ model_name: Literal["fasterrcnn_resnet50_fpn", "fasterrcnn_resnet50_fpn_v2"] = 'fasterrcnn_resnet50_fpn_v2',
36
+ init_with_pretrained: bool = False):
37
+ """
38
+ Args:
39
+ num_classes (int):
40
+ Number of output classes (including background).
41
+ in_channels (int):
42
+ Number of input channels (e.g., 1 for grayscale, 3 for RGB).
43
+ model_name (str):
44
+ The name of the Faster R-CNN model to use.
45
+ init_with_pretrained (bool):
46
+ If True, initializes the model with weights pretrained on COCO.
47
+ This flag is for initialization only and is NOT saved in the
48
+ architecture config. Defaults to False.
49
+ """
50
+ super().__init__()
51
+
52
+ # --- 1. Validation and Configuration ---
53
+ if not hasattr(detection_models, model_name):
54
+ _LOGGER.error(f"'{model_name}' is not a valid model name in torchvision.models.detection.")
55
+ raise ValueError()
56
+
57
+ self.num_classes = num_classes
58
+ self.in_channels = in_channels
59
+ self.model_name = model_name
60
+ self._pretrained_default_transforms = None
61
+
62
+ # --- 2. Instantiate the base model ---
63
+ model_constructor = getattr(detection_models, model_name)
64
+
65
+ # Format model name to find weights enum, e.g., fasterrcnn_resnet50_fpn_v2 -> FasterRCNN_ResNet50_FPN_V2_Weights
66
+ weights_model_name = model_name.replace('fasterrcnn_', 'FasterRCNN_').replace('resnet', 'ResNet').replace('_fpn', '_FPN')
67
+ weights_enum_name = f"{weights_model_name.upper()}_Weights"
68
+
69
+ weights_enum = getattr(detection_models, weights_enum_name, None) if weights_enum_name else None
70
+ weights = weights_enum.DEFAULT if weights_enum and init_with_pretrained else None
71
+
72
+ if weights:
73
+ self._pretrained_default_transforms = weights.transforms()
74
+
75
+ self.model = model_constructor(weights=weights, weights_backbone=weights)
76
+
77
+ # --- 4. Modify the output layer (Box Predictor) ---
78
+ # Get the number of input features for the classifier
79
+ in_features = self.model.roi_heads.box_predictor.cls_score.in_features
80
+ # Replace the pre-trained head with a new one
81
+ self.model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
82
+
83
+ # --- 3. Modify the input layer (Backbone conv1) ---
84
+ if in_channels != 3:
85
+ original_conv1 = self.model.backbone.body.conv1
86
+
87
+ new_conv1 = nn.Conv2d(
88
+ in_channels,
89
+ original_conv1.out_channels,
90
+ kernel_size=original_conv1.kernel_size, # type: ignore
91
+ stride=original_conv1.stride, # type: ignore
92
+ padding=original_conv1.padding, # type: ignore
93
+ bias=(original_conv1.bias is not None)
94
+ )
95
+
96
+ # (Optional) Average original weights if starting from pretrained
97
+ if init_with_pretrained and original_conv1.in_channels == 3 and weights is not None:
98
+ with torch.no_grad():
99
+ # Average the weights across the input channel dimension
100
+ avg_weights = torch.mean(original_conv1.weight, dim=1, keepdim=True)
101
+ # Repeat the averaged weights for the new number of input channels
102
+ new_conv1.weight[:] = avg_weights.repeat(1, in_channels, 1, 1)
103
+
104
+ self.model.backbone.body.conv1 = new_conv1
105
+
106
+ def forward(self, images: list[torch.Tensor], targets: Optional[list[dict[str, torch.Tensor]]] = None):
107
+ """
108
+ Defines the forward pass.
109
+
110
+ - In train mode, expects (images, targets) and returns a dict of losses.
111
+ - In eval mode, expects (images) and returns a list of prediction dicts.
112
+ """
113
+ # The model's forward pass handles train/eval mode internally.
114
+ return self.model(images, targets)
115
+
116
+ def get_architecture_config(self) -> dict[str, Any]:
117
+ """
118
+ Returns the structural configuration of the model.
119
+ The 'init_with_pretrained' flag is intentionally omitted,
120
+ as .load() should restore the architecture, not the weights.
121
+ """
122
+ return {
123
+ 'num_classes': self.num_classes,
124
+ 'in_channels': self.in_channels,
125
+ 'model_name': self.model_name
126
+ }
127
+
128
+ def __repr__(self) -> str:
129
+ """Returns the developer-friendly string representation of the model."""
130
+ return (
131
+ f"{self.__class__.__name__}(model='{self.model_name}', "
132
+ f"in_channels={self.in_channels}, "
133
+ f"num_classes={self.num_classes})"
134
+ )
135
+
@@ -0,0 +1,21 @@
1
+ from ._multi_dragon import DragonParetoOptimizer
2
+
3
+ from ._single_dragon import DragonOptimizer
4
+
5
+ from ._single_manual import (
6
+ FitnessEvaluator,
7
+ create_pytorch_problem,
8
+ run_optimization,
9
+ )
10
+
11
+ from ._imprimir import info
12
+
13
+
14
+ __all__ = [
15
+ "DragonParetoOptimizer",
16
+ "DragonOptimizer",
17
+ # manual optimization tools
18
+ "FitnessEvaluator",
19
+ "create_pytorch_problem",
20
+ "run_optimization",
21
+ ]
@@ -0,0 +1,13 @@
1
+ from .._core import _imprimir_disponibles
2
+
3
+ _GRUPOS = [
4
+ "DragonParetoOptimizer",
5
+ "DragonOptimizer",
6
+ # manual optimization tools
7
+ "FitnessEvaluator",
8
+ "create_pytorch_problem",
9
+ "run_optimization",
10
+ ]
11
+
12
+ def info():
13
+ _imprimir_disponibles(_GRUPOS)
@@ -1,13 +1,10 @@
1
1
  import torch
2
- import numpy as np
3
2
  import pandas as pd
4
3
  import matplotlib.pyplot as plt
5
- import matplotlib.colors as mcolors
6
- import matplotlib.cm as cm
7
4
  from matplotlib.collections import LineCollection
8
5
  import seaborn as sns
9
6
  from pathlib import Path
10
- from typing import Literal, Union, List, Optional, Dict
7
+ from typing import Literal, Union, Optional
11
8
  from tqdm import tqdm
12
9
  import plotly.express as px
13
10
  import plotly.graph_objects as go
@@ -17,22 +14,22 @@ from evotorch import Problem
17
14
  from evotorch.operators import SimulatedBinaryCrossOver, GaussianMutation
18
15
  from evotorch.operators import functional as func_ops
19
16
 
20
- from ._SQL import DragonSQL
21
- from ._ML_inference import DragonInferenceHandler
22
- from ._ML_chaining_inference import DragonChainInference
23
- from ._ML_configuration import DragonParetoConfig
24
- from ._optimization_tools import create_optimization_bounds, plot_optimal_feature_distributions_from_dataframe, load_continuous_bounds_template
25
- from ._math_utilities import discretize_categorical_values
26
- from ._utilities import save_dataframe_filename
27
- from ._IO_tools import save_json
28
- from ._path_manager import make_fullpath, sanitize_filename
29
- from ._logger import get_logger
30
- from ._script_info import _script_info
31
- from ._keys import PyTorchInferenceKeys, MLTaskKeys, ParetoOptimizationKeys
32
- from ._schema import FeatureSchema
17
+ from ..SQL import DragonSQL
18
+ from ..ML_inference import DragonInferenceHandler
19
+ from ..ML_inference._chain_inference import DragonChainInference
20
+ from ..ML_configuration import DragonParetoConfig
21
+ from ..optimization_tools import create_optimization_bounds, plot_optimal_feature_distributions_from_dataframe, load_continuous_bounds_template
22
+ from ..utilities import save_dataframe_filename
23
+ from ..IO_tools import save_json
24
+ from ..schema import FeatureSchema
33
25
 
26
+ from ..math_utilities import discretize_categorical_values
27
+ from ..path_manager import make_fullpath, sanitize_filename
28
+ from .._core import get_logger
29
+ from ..keys._keys import PyTorchInferenceKeys, MLTaskKeys, ParetoOptimizationKeys
34
30
 
35
- _LOGGER = get_logger("Pareto Optimizer")
31
+
32
+ _LOGGER = get_logger("DragonParetoOptimizer")
36
33
 
37
34
 
38
35
  __all__ = [
@@ -793,9 +790,9 @@ class _ParetoFitnessEvaluator:
793
790
  """
794
791
  def __init__(self,
795
792
  inference_handler: Union[DragonInferenceHandler, DragonChainInference],
796
- target_indices: List[int],
797
- target_names: List[str],
798
- categorical_index_map: Optional[Dict[int, int]] = None,
793
+ target_indices: list[int],
794
+ target_names: list[str],
795
+ categorical_index_map: Optional[dict[int, int]] = None,
799
796
  discretize_start_at_zero: bool = True,
800
797
  is_chain: bool = False):
801
798
 
@@ -856,6 +853,3 @@ class _ParetoFitnessEvaluator:
856
853
  # Slice specific indices -> (Batch, N_Selected_Targets)
857
854
  return preds[:, self.target_indices]
858
855
 
859
-
860
- def info():
861
- _script_info(__all__)