dragon-ml-toolbox 13.1.0__py3-none-any.whl → 14.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dragon-ml-toolbox might be problematic. Click here for more details.

@@ -0,0 +1,323 @@
1
+ import torch
2
+ from torch import nn
3
+ from typing import Union, Dict, Any
4
+ from pathlib import Path
5
+ import json
6
+
7
+ from ._logger import _LOGGER
8
+ from .path_manager import make_fullpath
9
+ from .keys import PytorchModelArchitectureKeys
10
+ from ._schema import FeatureSchema
11
+ from ._script_info import _script_info
12
+ from .ML_models import _ArchitectureHandlerMixin
13
+
14
+ # Imports from pytorch_tabular
15
+ try:
16
+ from omegaconf import DictConfig
17
+ from pytorch_tabular.models import GatedAdditiveTreeEnsembleModel, NODEModel
18
+ except ImportError:
19
+ _LOGGER.error(f"GATE and NODE require 'pip install pytorch_tabular omegaconf' dependencies.")
20
+ raise ImportError()
21
+
22
+
23
+ __all__ = [
24
+ "DragonGateModel",
25
+ "DragonNodeModel",
26
+ ]
27
+
28
+
29
+ class _BasePytabWrapper(nn.Module, _ArchitectureHandlerMixin):
30
+ """
31
+ Internal Base Class: Do not use directly.
32
+
33
+ This is an adapter to make pytorch_tabular models compatible with the
34
+ dragon-ml-toolbox pipeline.
35
+
36
+ It handles:
37
+ 1. Schema-based initialization.
38
+ 2. Single-tensor forward pass, which is then split into the
39
+ dict {'continuous': ..., 'categorical': ...} that pytorch_tabular expects.
40
+ 3. Saving/Loading architecture using the pipeline's _ArchitectureHandlerMixin.
41
+ """
42
+ def __init__(self, schema: FeatureSchema):
43
+ super().__init__()
44
+
45
+ self.schema = schema
46
+ self.model_name = "Base" # To be overridden by child
47
+ self.internal_model: nn.Module = None # type: ignore # To be set by child
48
+ self.model_hparams: Dict = dict() # To be set by child
49
+
50
+ # --- Derive indices from schema ---
51
+ categorical_map = schema.categorical_index_map
52
+
53
+ if categorical_map:
54
+ # The order of keys/values is implicitly linked and must be preserved
55
+ self.categorical_indices = list(categorical_map.keys())
56
+ self.cardinalities = list(categorical_map.values())
57
+ else:
58
+ self.categorical_indices = []
59
+ self.cardinalities = []
60
+
61
+ # Derive numerical indices by finding what's not categorical
62
+ all_indices = set(range(len(schema.feature_names)))
63
+ categorical_indices_set = set(self.categorical_indices)
64
+ self.numerical_indices = sorted(list(all_indices - categorical_indices_set))
65
+
66
+ def _build_pt_config(self, out_targets: int, **kwargs) -> DictConfig:
67
+ """Helper to create the minimal config dict for a pytorch_tabular model."""
68
+ # 'regression' is the most neutral for model architecture. The final output_dim is what truly matters.
69
+ task = "regression"
70
+
71
+ config_dict = {
72
+ # --- Data / Schema Params ---
73
+ 'task': task,
74
+ 'continuous_cols': list(self.schema.continuous_feature_names),
75
+ 'categorical_cols': list(self.schema.categorical_feature_names),
76
+ 'continuous_dim': len(self.numerical_indices),
77
+ 'categorical_dim': len(self.categorical_indices),
78
+ 'categorical_cardinality': self.cardinalities,
79
+ 'target': ['dummy_target'], # Required, but not used
80
+
81
+ # --- Model Params ---
82
+ 'output_dim': out_targets,
83
+ **kwargs
84
+ }
85
+
86
+ # Add common params that most models need
87
+ if 'loss' not in config_dict:
88
+ config_dict['loss'] = 'NotUsed'
89
+ if 'metrics' not in config_dict:
90
+ config_dict['metrics'] = []
91
+
92
+ return DictConfig(config_dict)
93
+
94
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
95
+ """
96
+ Accepts a single tensor and converts it to the dict
97
+ that pytorch_tabular models expect.
98
+ """
99
+ # 1. Split the single tensor input
100
+ x_cont = x[:, self.numerical_indices].float()
101
+ x_cat = x[:, self.categorical_indices].long()
102
+
103
+ # 2. Create the input dict
104
+ input_dict = {
105
+ 'continuous': x_cont,
106
+ 'categorical': x_cat
107
+ }
108
+
109
+ # 3. Pass to the internal pytorch_tabular model
110
+ # The model returns a dict, we extract the logits
111
+ model_output_dict = self.internal_model(input_dict)
112
+
113
+ # 4. Return the logits tensor
114
+ return model_output_dict['logits']
115
+
116
+ def get_architecture_config(self) -> Dict[str, Any]:
117
+ """Returns the full configuration of the model."""
118
+ # Deconstruct schema into a JSON-friendly dict
119
+ schema_dict = {
120
+ 'feature_names': self.schema.feature_names,
121
+ 'continuous_feature_names': self.schema.continuous_feature_names,
122
+ 'categorical_feature_names': self.schema.categorical_feature_names,
123
+ 'categorical_index_map': self.schema.categorical_index_map,
124
+ 'categorical_mappings': self.schema.categorical_mappings
125
+ }
126
+
127
+ config = {
128
+ 'schema_dict': schema_dict,
129
+ 'out_targets': self.out_targets,
130
+ **self.model_hparams
131
+ }
132
+ return config
133
+
134
+ @classmethod
135
+ def load(cls: type, file_or_dir: Union[str, Path], verbose: bool = True) -> nn.Module:
136
+ """Loads a model architecture from a JSON file."""
137
+ user_path = make_fullpath(file_or_dir)
138
+
139
+ if user_path.is_dir():
140
+ json_filename = PytorchModelArchitectureKeys.SAVENAME + ".json"
141
+ target_path = make_fullpath(user_path / json_filename, enforce="file")
142
+ elif user_path.is_file():
143
+ target_path = user_path
144
+ else:
145
+ _LOGGER.error(f"Invalid path: '{file_or_dir}'")
146
+ raise IOError()
147
+
148
+ with open(target_path, 'r') as f:
149
+ saved_data = json.load(f)
150
+
151
+ saved_class_name = saved_data[PytorchModelArchitectureKeys.MODEL]
152
+ config = saved_data[PytorchModelArchitectureKeys.CONFIG]
153
+
154
+ if saved_class_name != cls.__name__:
155
+ _LOGGER.error(f"Model class mismatch. File specifies '{saved_class_name}', but '{cls.__name__}' was expected.")
156
+ raise ValueError()
157
+
158
+ # --- RECONSTRUCTION LOGIC ---
159
+ if 'schema_dict' not in config:
160
+ _LOGGER.error("Invalid architecture file: missing 'schema_dict'. This file may be from an older version.")
161
+ raise ValueError("Missing 'schema_dict' in config.")
162
+
163
+ schema_data = config.pop('schema_dict')
164
+
165
+ # JSON saves all dict keys as strings, convert them back to int.
166
+ raw_index_map = schema_data['categorical_index_map']
167
+ if raw_index_map is not None:
168
+ rehydrated_index_map = {int(k): v for k, v in raw_index_map.items()}
169
+ else:
170
+ rehydrated_index_map = None
171
+
172
+ # JSON deserializes tuples as lists, convert them back.
173
+ schema = FeatureSchema(
174
+ feature_names=tuple(schema_data['feature_names']),
175
+ continuous_feature_names=tuple(schema_data['continuous_feature_names']),
176
+ categorical_feature_names=tuple(schema_data['categorical_feature_names']),
177
+ categorical_index_map=rehydrated_index_map,
178
+ categorical_mappings=schema_data['categorical_mappings']
179
+ )
180
+
181
+ config['schema'] = schema
182
+ # --- End Reconstruction ---
183
+
184
+ model = cls(**config)
185
+ if verbose:
186
+ _LOGGER.info(f"Successfully loaded architecture for '{saved_class_name}'")
187
+ return model
188
+
189
+ def __repr__(self) -> str:
190
+ internal_model_str = str(self.internal_model)
191
+ # Grab the first line of the internal model's repr
192
+ internal_repr = internal_model_str.split('\n')[0]
193
+ return f"{self.model_name}(internal_model={internal_repr})"
194
+
195
+
196
+ class DragonGateModel(_BasePytabWrapper):
197
+ """
198
+ Adapter for the Gated Additive Tree Ensemble (GATE) model from the 'pytorch_tabular' library.
199
+
200
+ GATE is a hybrid model that uses Gated Feature Learning Units (GFLUs) to
201
+ learn powerful feature representations. These learned features are then
202
+ fed into an additive ensemble of differentiable decision trees, combining
203
+ the representation learning of deep networks with the structured
204
+ decision-making of tree ensembles.
205
+ """
206
+ def __init__(self, *,
207
+ schema: FeatureSchema,
208
+ out_targets: int,
209
+ embedding_dim: int = 32,
210
+ gflu_stages: int = 6,
211
+ num_trees: int = 20,
212
+ tree_depth: int = 5,
213
+ dropout: float = 0.1):
214
+ """
215
+ Args:
216
+ schema (FeatureSchema):
217
+ The definitive schema object from data_exploration.
218
+ out_targets (int):
219
+ Number of output targets.
220
+ embedding_dim (int):
221
+ Dimension of the categorical embeddings. (Recommended: 16 to 64)
222
+ gflu_stages (int):
223
+ Number of Gated Feature Learning Units (GFLU) stages. (Recommended: 2 to 6)
224
+ num_trees (int):
225
+ Number of trees in the ensemble. (Recommended: 10 to 50)
226
+ tree_depth (int):
227
+ Depth of each tree. (Recommended: 4 to 8)
228
+ dropout (float):
229
+ Dropout rate for the GFLU.
230
+ """
231
+ super().__init__(schema)
232
+ self.model_name = "DragonGateModel"
233
+ self.out_targets = out_targets
234
+
235
+ # Store hparams for saving/loading
236
+ self.model_hparams = {
237
+ 'embedding_dim': embedding_dim,
238
+ 'gflu_stages': gflu_stages,
239
+ 'num_trees': num_trees,
240
+ 'tree_depth': tree_depth,
241
+ 'dropout': dropout
242
+ }
243
+
244
+ # Build the minimal config for the GateModel
245
+ pt_config = self._build_pt_config(
246
+ out_targets=out_targets,
247
+ embedding_dim=embedding_dim,
248
+ gflu_stages=gflu_stages,
249
+ num_trees=num_trees,
250
+ tree_depth=tree_depth,
251
+ dropout=dropout,
252
+ # GATE-specific params
253
+ gflu_dropout=dropout,
254
+ chain_trees=False,
255
+ )
256
+
257
+ # Instantiate the internal pytorch_tabular model
258
+ self.internal_model = GatedAdditiveTreeEnsembleModel(config=pt_config)
259
+
260
+
261
+ class DragonNodeModel(_BasePytabWrapper):
262
+ """
263
+ Adapter for the Neural Oblivious Decision Ensembles (NODE) model from the 'pytorch_tabular' library.
264
+
265
+ NODE is a model based on an ensemble of differentiable 'oblivious'
266
+ decision trees. An oblivious tree uses the same splitting feature and
267
+ threshold across all nodes at the same depth. This structure, combined
268
+ with a differentiable formulation, allows the model to be trained
269
+ end-to-end with gradient descent, learning feature interactions and
270
+ splitting thresholds simultaneously.
271
+ """
272
+ def __init__(self, *,
273
+ schema: FeatureSchema,
274
+ out_targets: int,
275
+ embedding_dim: int = 32,
276
+ num_trees: int = 1024,
277
+ tree_depth: int = 6,
278
+ dropout: float = 0.1):
279
+ """
280
+ Args:
281
+ schema (FeatureSchema):
282
+ The definitive schema object from data_exploration.
283
+ out_targets (int):
284
+ Number of output targets.
285
+ embedding_dim (int):
286
+ Dimension of the categorical embeddings. (Recommended: 16 to 64)
287
+ num_trees (int):
288
+ Total number of trees in the ensemble. (Recommended: 256 to 2048)
289
+ tree_depth (int):
290
+ Depth of each tree. (Recommended: 4 to 8)
291
+ dropout (float):
292
+ Dropout rate.
293
+ """
294
+ super().__init__(schema)
295
+ self.model_name = "DragonNodeModel"
296
+ self.out_targets = out_targets
297
+
298
+ # Store hparams for saving/loading
299
+ self.model_hparams = {
300
+ 'embedding_dim': embedding_dim,
301
+ 'num_trees': num_trees,
302
+ 'tree_depth': tree_depth,
303
+ 'dropout': dropout
304
+ }
305
+
306
+ # Build the minimal config for the NodeModel
307
+ pt_config = self._build_pt_config(
308
+ out_targets=out_targets,
309
+ embedding_dim=embedding_dim,
310
+ num_trees=num_trees,
311
+ tree_depth=tree_depth,
312
+ # NODE-specific params
313
+ num_layers=1, # NODE uses num_layers=1 for a single ensemble
314
+ total_trees=num_trees,
315
+ dropout_rate=dropout,
316
+ )
317
+
318
+ # Instantiate the internal pytorch_tabular model
319
+ self.internal_model = NODEModel(config=pt_config)
320
+
321
+
322
+ def info():
323
+ _script_info(__all__)