dragon-ml-toolbox 13.0.0__py3-none-any.whl → 14.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dragon_ml_toolbox-13.0.0.dist-info → dragon_ml_toolbox-14.7.0.dist-info}/METADATA +12 -2
- dragon_ml_toolbox-14.7.0.dist-info/RECORD +49 -0
- {dragon_ml_toolbox-13.0.0.dist-info → dragon_ml_toolbox-14.7.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +10 -0
- ml_tools/MICE_imputation.py +207 -5
- ml_tools/ML_configuration.py +108 -0
- ml_tools/ML_datasetmaster.py +241 -260
- ml_tools/ML_evaluation.py +229 -76
- ml_tools/ML_evaluation_multi.py +45 -16
- ml_tools/ML_inference.py +0 -1
- ml_tools/ML_models.py +135 -55
- ml_tools/ML_models_advanced.py +323 -0
- ml_tools/ML_optimization.py +49 -36
- ml_tools/ML_trainer.py +498 -29
- ml_tools/ML_utilities.py +351 -4
- ml_tools/ML_vision_datasetmaster.py +1492 -0
- ml_tools/ML_vision_evaluation.py +260 -0
- ml_tools/ML_vision_inference.py +428 -0
- ml_tools/ML_vision_models.py +641 -0
- ml_tools/ML_vision_transformers.py +203 -0
- ml_tools/PSO_optimization.py +5 -1
- ml_tools/_ML_vision_recipe.py +88 -0
- ml_tools/__init__.py +1 -0
- ml_tools/_schema.py +96 -0
- ml_tools/custom_logger.py +37 -14
- ml_tools/data_exploration.py +576 -138
- ml_tools/ensemble_evaluation.py +53 -10
- ml_tools/keys.py +43 -1
- ml_tools/math_utilities.py +1 -1
- ml_tools/optimization_tools.py +65 -86
- ml_tools/serde.py +78 -17
- ml_tools/utilities.py +192 -3
- dragon_ml_toolbox-13.0.0.dist-info/RECORD +0 -41
- ml_tools/ML_simple_optimization.py +0 -413
- {dragon_ml_toolbox-13.0.0.dist-info → dragon_ml_toolbox-14.7.0.dist-info}/WHEEL +0 -0
- {dragon_ml_toolbox-13.0.0.dist-info → dragon_ml_toolbox-14.7.0.dist-info}/licenses/LICENSE +0 -0
- {dragon_ml_toolbox-13.0.0.dist-info → dragon_ml_toolbox-14.7.0.dist-info}/top_level.txt +0 -0
ml_tools/ML_models.py
CHANGED
|
@@ -8,6 +8,7 @@ from ._logger import _LOGGER
|
|
|
8
8
|
from .path_manager import make_fullpath
|
|
9
9
|
from ._script_info import _script_info
|
|
10
10
|
from .keys import PytorchModelArchitectureKeys
|
|
11
|
+
from ._schema import FeatureSchema
|
|
11
12
|
|
|
12
13
|
|
|
13
14
|
__all__ = [
|
|
@@ -298,76 +299,73 @@ class TabularTransformer(nn.Module, _ArchitectureHandlerMixin):
|
|
|
298
299
|
"""
|
|
299
300
|
A Transformer-based model for tabular data tasks.
|
|
300
301
|
|
|
301
|
-
This model uses a Feature Tokenizer to convert all input features into a
|
|
302
|
+
This model uses a Feature Tokenizer to convert all input features into a
|
|
303
|
+
sequence of embeddings, prepends a [CLS] token, and processes the
|
|
302
304
|
sequence with a standard Transformer Encoder.
|
|
303
305
|
"""
|
|
304
306
|
def __init__(self, *,
|
|
305
|
-
|
|
307
|
+
schema: FeatureSchema,
|
|
306
308
|
out_targets: int,
|
|
307
|
-
|
|
308
|
-
embedding_dim: int = 32,
|
|
309
|
+
embedding_dim: int = 256,
|
|
309
310
|
num_heads: int = 8,
|
|
310
311
|
num_layers: int = 6,
|
|
311
|
-
dropout: float = 0.
|
|
312
|
+
dropout: float = 0.2):
|
|
312
313
|
"""
|
|
313
314
|
Args:
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
315
|
+
schema (FeatureSchema):
|
|
316
|
+
The definitive schema object created by `data_exploration.finalize_feature_schema()`.
|
|
317
|
+
out_targets (int):
|
|
318
|
+
Number of output targets (1 for regression).
|
|
319
|
+
embedding_dim (int):
|
|
320
|
+
The dimension for all feature embeddings. Must be divisible by num_heads. Common values: (64, 128, 192, 256, etc.)
|
|
321
|
+
num_heads (int):
|
|
322
|
+
The number of heads in the multi-head attention mechanism. Common values: (4, 8, 16)
|
|
323
|
+
num_layers (int):
|
|
324
|
+
The number of sub-encoder-layers in the transformer encoder. Common values: (4, 8, 12)
|
|
325
|
+
dropout (float):
|
|
326
|
+
The dropout value.
|
|
327
|
+
|
|
328
|
+
## Note:
|
|
325
329
|
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
**
|
|
331
|
-
|
|
332
|
-
their cardinality (the number of unique categories) via the `categorical_map` parameter.
|
|
333
|
-
|
|
334
|
-
**Ordinal & Binary Features** (e.g., 'Low/Medium/High', 'True/False'): Should be treated as **numerical**. Map them to numbers that
|
|
335
|
-
represent their state (e.g., `{'Low': 0, 'Medium': 1}` or `{False: 0, True: 1}`). Their column indices should **NOT** be included in the
|
|
336
|
-
`categorical_map` parameter.
|
|
330
|
+
**Embedding Dimension:** "Width" of the model. It's the N-dimension vector that will be used to represent each one of the features.
|
|
331
|
+
- Each continuous feature gets its own learnable N-dimension vector.
|
|
332
|
+
- Each categorical feature gets an embedding table that maps every category (e.g., "color=red", "color=blue") to a unique N-dimension vector.
|
|
333
|
+
|
|
334
|
+
**Attention Heads:** Controls the "Multi-Head Attention" mechanism. Instead of looking at all the feature interactions at once, the model splits its attention into N parallel heads.
|
|
335
|
+
- Embedding Dimensions get divided by the number of Attention Heads, resulting in the dimensions assigned per head.
|
|
337
336
|
|
|
338
|
-
**
|
|
337
|
+
**Number of Layers:** "Depth" of the model. Number of identical `TransformerEncoderLayer` blocks that are stacked on top of each other.
|
|
338
|
+
- Layer 1: The attention heads find simple, direct interactions between the features.
|
|
339
|
+
- Layer 2: Takes the output of Layer 1 and finds interactions between those interactions and so on.
|
|
340
|
+
- Trade-off: More layers are more powerful but are slower to train and more prone to overfitting. If the training loss goes down but the validation loss goes up, you might have too many layers (or need more dropout).
|
|
341
|
+
|
|
339
342
|
"""
|
|
340
343
|
super().__init__()
|
|
341
344
|
|
|
345
|
+
# --- Get info from schema ---
|
|
346
|
+
in_features = len(schema.feature_names)
|
|
347
|
+
categorical_index_map = schema.categorical_index_map
|
|
348
|
+
|
|
342
349
|
# --- Validation ---
|
|
343
|
-
if categorical_index_map and max(categorical_index_map.keys()) >= in_features:
|
|
350
|
+
if categorical_index_map and (max(categorical_index_map.keys()) >= in_features):
|
|
344
351
|
_LOGGER.error(f"A categorical index ({max(categorical_index_map.keys())}) is out of bounds for the provided input features ({in_features}).")
|
|
345
352
|
raise ValueError()
|
|
346
353
|
|
|
347
|
-
# --- Derive numerical indices ---
|
|
348
|
-
all_indices = set(range(in_features))
|
|
349
|
-
categorical_indices_set = set(categorical_index_map.keys())
|
|
350
|
-
numerical_indices = sorted(list(all_indices - categorical_indices_set))
|
|
351
|
-
|
|
352
354
|
# --- Save configuration ---
|
|
353
|
-
self.
|
|
355
|
+
self.schema = schema # <-- Save the whole schema
|
|
354
356
|
self.out_targets = out_targets
|
|
355
|
-
self.numerical_indices = numerical_indices
|
|
356
|
-
self.categorical_map = categorical_index_map
|
|
357
357
|
self.embedding_dim = embedding_dim
|
|
358
358
|
self.num_heads = num_heads
|
|
359
359
|
self.num_layers = num_layers
|
|
360
360
|
self.dropout = dropout
|
|
361
361
|
|
|
362
|
-
# --- 1. Feature Tokenizer ---
|
|
362
|
+
# --- 1. Feature Tokenizer (now takes the schema) ---
|
|
363
363
|
self.tokenizer = _FeatureTokenizer(
|
|
364
|
-
|
|
365
|
-
categorical_map=categorical_index_map,
|
|
364
|
+
schema=schema,
|
|
366
365
|
embedding_dim=embedding_dim
|
|
367
366
|
)
|
|
368
367
|
|
|
369
368
|
# --- 2. CLS Token ---
|
|
370
|
-
# A learnable token that will be prepended to the sequence.
|
|
371
369
|
self.cls_token = nn.Parameter(torch.randn(1, 1, embedding_dim))
|
|
372
370
|
|
|
373
371
|
# --- 3. Transformer Encoder ---
|
|
@@ -416,21 +414,87 @@ class TabularTransformer(nn.Module, _ArchitectureHandlerMixin):
|
|
|
416
414
|
|
|
417
415
|
def get_architecture_config(self) -> Dict[str, Any]:
|
|
418
416
|
"""Returns the full configuration of the model."""
|
|
417
|
+
# Deconstruct schema into a JSON-friendly dict
|
|
418
|
+
# Tuples are saved as lists
|
|
419
|
+
schema_dict = {
|
|
420
|
+
'feature_names': self.schema.feature_names,
|
|
421
|
+
'continuous_feature_names': self.schema.continuous_feature_names,
|
|
422
|
+
'categorical_feature_names': self.schema.categorical_feature_names,
|
|
423
|
+
'categorical_index_map': self.schema.categorical_index_map,
|
|
424
|
+
'categorical_mappings': self.schema.categorical_mappings
|
|
425
|
+
}
|
|
426
|
+
|
|
419
427
|
return {
|
|
420
|
-
'
|
|
428
|
+
'schema_dict': schema_dict,
|
|
421
429
|
'out_targets': self.out_targets,
|
|
422
|
-
'categorical_map': self.categorical_map,
|
|
423
430
|
'embedding_dim': self.embedding_dim,
|
|
424
431
|
'num_heads': self.num_heads,
|
|
425
432
|
'num_layers': self.num_layers,
|
|
426
433
|
'dropout': self.dropout
|
|
427
434
|
}
|
|
435
|
+
|
|
436
|
+
@classmethod
|
|
437
|
+
def load(cls: type, file_or_dir: Union[str, Path], verbose: bool = True) -> nn.Module:
|
|
438
|
+
"""Loads a model architecture from a JSON file."""
|
|
439
|
+
user_path = make_fullpath(file_or_dir)
|
|
440
|
+
|
|
441
|
+
if user_path.is_dir():
|
|
442
|
+
json_filename = PytorchModelArchitectureKeys.SAVENAME + ".json"
|
|
443
|
+
target_path = make_fullpath(user_path / json_filename, enforce="file")
|
|
444
|
+
elif user_path.is_file():
|
|
445
|
+
target_path = user_path
|
|
446
|
+
else:
|
|
447
|
+
_LOGGER.error(f"Invalid path: '{file_or_dir}'")
|
|
448
|
+
raise IOError()
|
|
449
|
+
|
|
450
|
+
with open(target_path, 'r') as f:
|
|
451
|
+
saved_data = json.load(f)
|
|
452
|
+
|
|
453
|
+
saved_class_name = saved_data[PytorchModelArchitectureKeys.MODEL]
|
|
454
|
+
config = saved_data[PytorchModelArchitectureKeys.CONFIG]
|
|
455
|
+
|
|
456
|
+
if saved_class_name != cls.__name__:
|
|
457
|
+
_LOGGER.error(f"Model class mismatch. File specifies '{saved_class_name}', but '{cls.__name__}' was expected.")
|
|
458
|
+
raise ValueError()
|
|
459
|
+
|
|
460
|
+
# --- RECONSTRUCTION LOGIC ---
|
|
461
|
+
if 'schema_dict' not in config:
|
|
462
|
+
_LOGGER.error("Invalid architecture file: missing 'schema_dict'. This file may be from an older version.")
|
|
463
|
+
raise ValueError("Missing 'schema_dict' in config.")
|
|
464
|
+
|
|
465
|
+
schema_data = config.pop('schema_dict')
|
|
466
|
+
|
|
467
|
+
# Re-hydrate the categorical_index_map
|
|
468
|
+
# JSON saves all dict keys as strings, so we must convert them back to int.
|
|
469
|
+
raw_index_map = schema_data['categorical_index_map']
|
|
470
|
+
if raw_index_map is not None:
|
|
471
|
+
rehydrated_index_map = {int(k): v for k, v in raw_index_map.items()}
|
|
472
|
+
else:
|
|
473
|
+
rehydrated_index_map = None
|
|
474
|
+
|
|
475
|
+
# Re-hydrate the FeatureSchema object
|
|
476
|
+
# JSON deserializes tuples as lists, so we must convert them back.
|
|
477
|
+
schema = FeatureSchema(
|
|
478
|
+
feature_names=tuple(schema_data['feature_names']),
|
|
479
|
+
continuous_feature_names=tuple(schema_data['continuous_feature_names']),
|
|
480
|
+
categorical_feature_names=tuple(schema_data['categorical_feature_names']),
|
|
481
|
+
categorical_index_map=rehydrated_index_map,
|
|
482
|
+
categorical_mappings=schema_data['categorical_mappings']
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
config['schema'] = schema
|
|
486
|
+
# --- End Reconstruction ---
|
|
487
|
+
|
|
488
|
+
model = cls(**config)
|
|
489
|
+
if verbose:
|
|
490
|
+
_LOGGER.info(f"Successfully loaded architecture for '{saved_class_name}'")
|
|
491
|
+
return model
|
|
428
492
|
|
|
429
493
|
def __repr__(self) -> str:
|
|
430
494
|
"""Returns the developer-friendly string representation of the model."""
|
|
431
495
|
# Build the architecture string part-by-part
|
|
432
496
|
parts = [
|
|
433
|
-
f"Tokenizer(features={self.
|
|
497
|
+
f"Tokenizer(features={len(self.schema.feature_names)}, dim={self.embedding_dim})",
|
|
434
498
|
"[CLS]",
|
|
435
499
|
f"TransformerEncoder(layers={self.num_layers}, heads={self.num_heads})",
|
|
436
500
|
f"PredictionHead(outputs={self.out_targets})"
|
|
@@ -443,29 +507,41 @@ class TabularTransformer(nn.Module, _ArchitectureHandlerMixin):
|
|
|
443
507
|
|
|
444
508
|
class _FeatureTokenizer(nn.Module):
|
|
445
509
|
"""
|
|
446
|
-
Transforms raw numerical and categorical features from any column order
|
|
510
|
+
Transforms raw numerical and categorical features from any column order
|
|
511
|
+
into a sequence of embeddings.
|
|
447
512
|
"""
|
|
448
513
|
def __init__(self,
|
|
449
|
-
|
|
450
|
-
categorical_map: Dict[int, int],
|
|
514
|
+
schema: FeatureSchema,
|
|
451
515
|
embedding_dim: int):
|
|
452
516
|
"""
|
|
453
517
|
Args:
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
embedding_dim (int):
|
|
518
|
+
schema (FeatureSchema):
|
|
519
|
+
The definitive schema object from data_exploration.
|
|
520
|
+
embedding_dim (int):
|
|
521
|
+
The dimension for all feature embeddings.
|
|
457
522
|
"""
|
|
458
523
|
super().__init__()
|
|
459
524
|
|
|
460
|
-
#
|
|
461
|
-
|
|
462
|
-
|
|
525
|
+
# --- Get info from schema ---
|
|
526
|
+
categorical_map = schema.categorical_index_map
|
|
527
|
+
|
|
528
|
+
if categorical_map:
|
|
529
|
+
# Unpack the dictionary into separate lists
|
|
530
|
+
self.categorical_indices = list(categorical_map.keys())
|
|
531
|
+
cardinalities = list(categorical_map.values())
|
|
532
|
+
else:
|
|
533
|
+
self.categorical_indices = []
|
|
534
|
+
cardinalities = []
|
|
535
|
+
|
|
536
|
+
# Derive numerical indices by finding what's not categorical
|
|
537
|
+
all_indices = set(range(len(schema.feature_names)))
|
|
538
|
+
categorical_indices_set = set(self.categorical_indices)
|
|
539
|
+
self.numerical_indices = sorted(list(all_indices - categorical_indices_set))
|
|
463
540
|
|
|
464
|
-
self.numerical_indices = numerical_indices
|
|
465
541
|
self.embedding_dim = embedding_dim
|
|
466
542
|
|
|
467
543
|
# A learnable embedding for each numerical feature
|
|
468
|
-
self.numerical_embeddings = nn.Parameter(torch.randn(len(numerical_indices), embedding_dim))
|
|
544
|
+
self.numerical_embeddings = nn.Parameter(torch.randn(len(self.numerical_indices), embedding_dim))
|
|
469
545
|
|
|
470
546
|
# A standard embedding layer for each categorical feature
|
|
471
547
|
self.categorical_embeddings = nn.ModuleList(
|
|
@@ -487,6 +563,8 @@ class _FeatureTokenizer(nn.Module):
|
|
|
487
563
|
# Process categorical features
|
|
488
564
|
categorical_tokens = []
|
|
489
565
|
for i, embed_layer in enumerate(self.categorical_embeddings):
|
|
566
|
+
# x_categorical[:, i] selects the i-th categorical column
|
|
567
|
+
# (e.g., all values for the 'color' feature)
|
|
490
568
|
token = embed_layer(x_categorical[:, i]).unsqueeze(1)
|
|
491
569
|
categorical_tokens.append(token)
|
|
492
570
|
|
|
@@ -670,5 +748,7 @@ class SequencePredictorLSTM(nn.Module, _ArchitectureHandlerMixin):
|
|
|
670
748
|
)
|
|
671
749
|
|
|
672
750
|
|
|
751
|
+
# ---- PyTorch models ---
|
|
752
|
+
|
|
673
753
|
def info():
|
|
674
754
|
_script_info(__all__)
|
|
@@ -0,0 +1,323 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from torch import nn
|
|
3
|
+
from typing import Union, Dict, Any
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
import json
|
|
6
|
+
|
|
7
|
+
from ._logger import _LOGGER
|
|
8
|
+
from .path_manager import make_fullpath
|
|
9
|
+
from .keys import PytorchModelArchitectureKeys
|
|
10
|
+
from ._schema import FeatureSchema
|
|
11
|
+
from ._script_info import _script_info
|
|
12
|
+
from .ML_models import _ArchitectureHandlerMixin
|
|
13
|
+
|
|
14
|
+
# Imports from pytorch_tabular
|
|
15
|
+
try:
|
|
16
|
+
from omegaconf import DictConfig
|
|
17
|
+
from pytorch_tabular.models import GatedAdditiveTreeEnsembleModel, NODEModel
|
|
18
|
+
except ImportError:
|
|
19
|
+
_LOGGER.error(f"GATE and NODE require 'pip install pytorch_tabular omegaconf' dependencies.")
|
|
20
|
+
raise ImportError()
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
__all__ = [
|
|
24
|
+
"DragonGateModel",
|
|
25
|
+
"DragonNodeModel",
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class _BasePytabWrapper(nn.Module, _ArchitectureHandlerMixin):
|
|
30
|
+
"""
|
|
31
|
+
Internal Base Class: Do not use directly.
|
|
32
|
+
|
|
33
|
+
This is an adapter to make pytorch_tabular models compatible with the
|
|
34
|
+
dragon-ml-toolbox pipeline.
|
|
35
|
+
|
|
36
|
+
It handles:
|
|
37
|
+
1. Schema-based initialization.
|
|
38
|
+
2. Single-tensor forward pass, which is then split into the
|
|
39
|
+
dict {'continuous': ..., 'categorical': ...} that pytorch_tabular expects.
|
|
40
|
+
3. Saving/Loading architecture using the pipeline's _ArchitectureHandlerMixin.
|
|
41
|
+
"""
|
|
42
|
+
def __init__(self, schema: FeatureSchema):
|
|
43
|
+
super().__init__()
|
|
44
|
+
|
|
45
|
+
self.schema = schema
|
|
46
|
+
self.model_name = "Base" # To be overridden by child
|
|
47
|
+
self.internal_model: nn.Module = None # type: ignore # To be set by child
|
|
48
|
+
self.model_hparams: Dict = dict() # To be set by child
|
|
49
|
+
|
|
50
|
+
# --- Derive indices from schema ---
|
|
51
|
+
categorical_map = schema.categorical_index_map
|
|
52
|
+
|
|
53
|
+
if categorical_map:
|
|
54
|
+
# The order of keys/values is implicitly linked and must be preserved
|
|
55
|
+
self.categorical_indices = list(categorical_map.keys())
|
|
56
|
+
self.cardinalities = list(categorical_map.values())
|
|
57
|
+
else:
|
|
58
|
+
self.categorical_indices = []
|
|
59
|
+
self.cardinalities = []
|
|
60
|
+
|
|
61
|
+
# Derive numerical indices by finding what's not categorical
|
|
62
|
+
all_indices = set(range(len(schema.feature_names)))
|
|
63
|
+
categorical_indices_set = set(self.categorical_indices)
|
|
64
|
+
self.numerical_indices = sorted(list(all_indices - categorical_indices_set))
|
|
65
|
+
|
|
66
|
+
def _build_pt_config(self, out_targets: int, **kwargs) -> DictConfig:
|
|
67
|
+
"""Helper to create the minimal config dict for a pytorch_tabular model."""
|
|
68
|
+
# 'regression' is the most neutral for model architecture. The final output_dim is what truly matters.
|
|
69
|
+
task = "regression"
|
|
70
|
+
|
|
71
|
+
config_dict = {
|
|
72
|
+
# --- Data / Schema Params ---
|
|
73
|
+
'task': task,
|
|
74
|
+
'continuous_cols': list(self.schema.continuous_feature_names),
|
|
75
|
+
'categorical_cols': list(self.schema.categorical_feature_names),
|
|
76
|
+
'continuous_dim': len(self.numerical_indices),
|
|
77
|
+
'categorical_dim': len(self.categorical_indices),
|
|
78
|
+
'categorical_cardinality': self.cardinalities,
|
|
79
|
+
'target': ['dummy_target'], # Required, but not used
|
|
80
|
+
|
|
81
|
+
# --- Model Params ---
|
|
82
|
+
'output_dim': out_targets,
|
|
83
|
+
**kwargs
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
# Add common params that most models need
|
|
87
|
+
if 'loss' not in config_dict:
|
|
88
|
+
config_dict['loss'] = 'NotUsed'
|
|
89
|
+
if 'metrics' not in config_dict:
|
|
90
|
+
config_dict['metrics'] = []
|
|
91
|
+
|
|
92
|
+
return DictConfig(config_dict)
|
|
93
|
+
|
|
94
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
95
|
+
"""
|
|
96
|
+
Accepts a single tensor and converts it to the dict
|
|
97
|
+
that pytorch_tabular models expect.
|
|
98
|
+
"""
|
|
99
|
+
# 1. Split the single tensor input
|
|
100
|
+
x_cont = x[:, self.numerical_indices].float()
|
|
101
|
+
x_cat = x[:, self.categorical_indices].long()
|
|
102
|
+
|
|
103
|
+
# 2. Create the input dict
|
|
104
|
+
input_dict = {
|
|
105
|
+
'continuous': x_cont,
|
|
106
|
+
'categorical': x_cat
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
# 3. Pass to the internal pytorch_tabular model
|
|
110
|
+
# The model returns a dict, we extract the logits
|
|
111
|
+
model_output_dict = self.internal_model(input_dict)
|
|
112
|
+
|
|
113
|
+
# 4. Return the logits tensor
|
|
114
|
+
return model_output_dict['logits']
|
|
115
|
+
|
|
116
|
+
def get_architecture_config(self) -> Dict[str, Any]:
|
|
117
|
+
"""Returns the full configuration of the model."""
|
|
118
|
+
# Deconstruct schema into a JSON-friendly dict
|
|
119
|
+
schema_dict = {
|
|
120
|
+
'feature_names': self.schema.feature_names,
|
|
121
|
+
'continuous_feature_names': self.schema.continuous_feature_names,
|
|
122
|
+
'categorical_feature_names': self.schema.categorical_feature_names,
|
|
123
|
+
'categorical_index_map': self.schema.categorical_index_map,
|
|
124
|
+
'categorical_mappings': self.schema.categorical_mappings
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
config = {
|
|
128
|
+
'schema_dict': schema_dict,
|
|
129
|
+
'out_targets': self.out_targets,
|
|
130
|
+
**self.model_hparams
|
|
131
|
+
}
|
|
132
|
+
return config
|
|
133
|
+
|
|
134
|
+
@classmethod
|
|
135
|
+
def load(cls: type, file_or_dir: Union[str, Path], verbose: bool = True) -> nn.Module:
|
|
136
|
+
"""Loads a model architecture from a JSON file."""
|
|
137
|
+
user_path = make_fullpath(file_or_dir)
|
|
138
|
+
|
|
139
|
+
if user_path.is_dir():
|
|
140
|
+
json_filename = PytorchModelArchitectureKeys.SAVENAME + ".json"
|
|
141
|
+
target_path = make_fullpath(user_path / json_filename, enforce="file")
|
|
142
|
+
elif user_path.is_file():
|
|
143
|
+
target_path = user_path
|
|
144
|
+
else:
|
|
145
|
+
_LOGGER.error(f"Invalid path: '{file_or_dir}'")
|
|
146
|
+
raise IOError()
|
|
147
|
+
|
|
148
|
+
with open(target_path, 'r') as f:
|
|
149
|
+
saved_data = json.load(f)
|
|
150
|
+
|
|
151
|
+
saved_class_name = saved_data[PytorchModelArchitectureKeys.MODEL]
|
|
152
|
+
config = saved_data[PytorchModelArchitectureKeys.CONFIG]
|
|
153
|
+
|
|
154
|
+
if saved_class_name != cls.__name__:
|
|
155
|
+
_LOGGER.error(f"Model class mismatch. File specifies '{saved_class_name}', but '{cls.__name__}' was expected.")
|
|
156
|
+
raise ValueError()
|
|
157
|
+
|
|
158
|
+
# --- RECONSTRUCTION LOGIC ---
|
|
159
|
+
if 'schema_dict' not in config:
|
|
160
|
+
_LOGGER.error("Invalid architecture file: missing 'schema_dict'. This file may be from an older version.")
|
|
161
|
+
raise ValueError("Missing 'schema_dict' in config.")
|
|
162
|
+
|
|
163
|
+
schema_data = config.pop('schema_dict')
|
|
164
|
+
|
|
165
|
+
# JSON saves all dict keys as strings, convert them back to int.
|
|
166
|
+
raw_index_map = schema_data['categorical_index_map']
|
|
167
|
+
if raw_index_map is not None:
|
|
168
|
+
rehydrated_index_map = {int(k): v for k, v in raw_index_map.items()}
|
|
169
|
+
else:
|
|
170
|
+
rehydrated_index_map = None
|
|
171
|
+
|
|
172
|
+
# JSON deserializes tuples as lists, convert them back.
|
|
173
|
+
schema = FeatureSchema(
|
|
174
|
+
feature_names=tuple(schema_data['feature_names']),
|
|
175
|
+
continuous_feature_names=tuple(schema_data['continuous_feature_names']),
|
|
176
|
+
categorical_feature_names=tuple(schema_data['categorical_feature_names']),
|
|
177
|
+
categorical_index_map=rehydrated_index_map,
|
|
178
|
+
categorical_mappings=schema_data['categorical_mappings']
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
config['schema'] = schema
|
|
182
|
+
# --- End Reconstruction ---
|
|
183
|
+
|
|
184
|
+
model = cls(**config)
|
|
185
|
+
if verbose:
|
|
186
|
+
_LOGGER.info(f"Successfully loaded architecture for '{saved_class_name}'")
|
|
187
|
+
return model
|
|
188
|
+
|
|
189
|
+
def __repr__(self) -> str:
|
|
190
|
+
internal_model_str = str(self.internal_model)
|
|
191
|
+
# Grab the first line of the internal model's repr
|
|
192
|
+
internal_repr = internal_model_str.split('\n')[0]
|
|
193
|
+
return f"{self.model_name}(internal_model={internal_repr})"
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
class DragonGateModel(_BasePytabWrapper):
|
|
197
|
+
"""
|
|
198
|
+
Adapter for the Gated Additive Tree Ensemble (GATE) model from the 'pytorch_tabular' library.
|
|
199
|
+
|
|
200
|
+
GATE is a hybrid model that uses Gated Feature Learning Units (GFLUs) to
|
|
201
|
+
learn powerful feature representations. These learned features are then
|
|
202
|
+
fed into an additive ensemble of differentiable decision trees, combining
|
|
203
|
+
the representation learning of deep networks with the structured
|
|
204
|
+
decision-making of tree ensembles.
|
|
205
|
+
"""
|
|
206
|
+
def __init__(self, *,
|
|
207
|
+
schema: FeatureSchema,
|
|
208
|
+
out_targets: int,
|
|
209
|
+
embedding_dim: int = 32,
|
|
210
|
+
gflu_stages: int = 6,
|
|
211
|
+
num_trees: int = 20,
|
|
212
|
+
tree_depth: int = 5,
|
|
213
|
+
dropout: float = 0.1):
|
|
214
|
+
"""
|
|
215
|
+
Args:
|
|
216
|
+
schema (FeatureSchema):
|
|
217
|
+
The definitive schema object from data_exploration.
|
|
218
|
+
out_targets (int):
|
|
219
|
+
Number of output targets.
|
|
220
|
+
embedding_dim (int):
|
|
221
|
+
Dimension of the categorical embeddings. (Recommended: 16 to 64)
|
|
222
|
+
gflu_stages (int):
|
|
223
|
+
Number of Gated Feature Learning Units (GFLU) stages. (Recommended: 2 to 6)
|
|
224
|
+
num_trees (int):
|
|
225
|
+
Number of trees in the ensemble. (Recommended: 10 to 50)
|
|
226
|
+
tree_depth (int):
|
|
227
|
+
Depth of each tree. (Recommended: 4 to 8)
|
|
228
|
+
dropout (float):
|
|
229
|
+
Dropout rate for the GFLU.
|
|
230
|
+
"""
|
|
231
|
+
super().__init__(schema)
|
|
232
|
+
self.model_name = "DragonGateModel"
|
|
233
|
+
self.out_targets = out_targets
|
|
234
|
+
|
|
235
|
+
# Store hparams for saving/loading
|
|
236
|
+
self.model_hparams = {
|
|
237
|
+
'embedding_dim': embedding_dim,
|
|
238
|
+
'gflu_stages': gflu_stages,
|
|
239
|
+
'num_trees': num_trees,
|
|
240
|
+
'tree_depth': tree_depth,
|
|
241
|
+
'dropout': dropout
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
# Build the minimal config for the GateModel
|
|
245
|
+
pt_config = self._build_pt_config(
|
|
246
|
+
out_targets=out_targets,
|
|
247
|
+
embedding_dim=embedding_dim,
|
|
248
|
+
gflu_stages=gflu_stages,
|
|
249
|
+
num_trees=num_trees,
|
|
250
|
+
tree_depth=tree_depth,
|
|
251
|
+
dropout=dropout,
|
|
252
|
+
# GATE-specific params
|
|
253
|
+
gflu_dropout=dropout,
|
|
254
|
+
chain_trees=False,
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
# Instantiate the internal pytorch_tabular model
|
|
258
|
+
self.internal_model = GatedAdditiveTreeEnsembleModel(config=pt_config)
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
class DragonNodeModel(_BasePytabWrapper):
|
|
262
|
+
"""
|
|
263
|
+
Adapter for the Neural Oblivious Decision Ensembles (NODE) model from the 'pytorch_tabular' library.
|
|
264
|
+
|
|
265
|
+
NODE is a model based on an ensemble of differentiable 'oblivious'
|
|
266
|
+
decision trees. An oblivious tree uses the same splitting feature and
|
|
267
|
+
threshold across all nodes at the same depth. This structure, combined
|
|
268
|
+
with a differentiable formulation, allows the model to be trained
|
|
269
|
+
end-to-end with gradient descent, learning feature interactions and
|
|
270
|
+
splitting thresholds simultaneously.
|
|
271
|
+
"""
|
|
272
|
+
def __init__(self, *,
|
|
273
|
+
schema: FeatureSchema,
|
|
274
|
+
out_targets: int,
|
|
275
|
+
embedding_dim: int = 32,
|
|
276
|
+
num_trees: int = 1024,
|
|
277
|
+
tree_depth: int = 6,
|
|
278
|
+
dropout: float = 0.1):
|
|
279
|
+
"""
|
|
280
|
+
Args:
|
|
281
|
+
schema (FeatureSchema):
|
|
282
|
+
The definitive schema object from data_exploration.
|
|
283
|
+
out_targets (int):
|
|
284
|
+
Number of output targets.
|
|
285
|
+
embedding_dim (int):
|
|
286
|
+
Dimension of the categorical embeddings. (Recommended: 16 to 64)
|
|
287
|
+
num_trees (int):
|
|
288
|
+
Total number of trees in the ensemble. (Recommended: 256 to 2048)
|
|
289
|
+
tree_depth (int):
|
|
290
|
+
Depth of each tree. (Recommended: 4 to 8)
|
|
291
|
+
dropout (float):
|
|
292
|
+
Dropout rate.
|
|
293
|
+
"""
|
|
294
|
+
super().__init__(schema)
|
|
295
|
+
self.model_name = "DragonNodeModel"
|
|
296
|
+
self.out_targets = out_targets
|
|
297
|
+
|
|
298
|
+
# Store hparams for saving/loading
|
|
299
|
+
self.model_hparams = {
|
|
300
|
+
'embedding_dim': embedding_dim,
|
|
301
|
+
'num_trees': num_trees,
|
|
302
|
+
'tree_depth': tree_depth,
|
|
303
|
+
'dropout': dropout
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
# Build the minimal config for the NodeModel
|
|
307
|
+
pt_config = self._build_pt_config(
|
|
308
|
+
out_targets=out_targets,
|
|
309
|
+
embedding_dim=embedding_dim,
|
|
310
|
+
num_trees=num_trees,
|
|
311
|
+
tree_depth=tree_depth,
|
|
312
|
+
# NODE-specific params
|
|
313
|
+
num_layers=1, # NODE uses num_layers=1 for a single ensemble
|
|
314
|
+
total_trees=num_trees,
|
|
315
|
+
dropout_rate=dropout,
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
# Instantiate the internal pytorch_tabular model
|
|
319
|
+
self.internal_model = NODEModel(config=pt_config)
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
def info():
|
|
323
|
+
_script_info(__all__)
|