autogluon.tabular 1.2.1b20250407__py3-none-any.whl → 1.2.1b20250409__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autogluon/tabular/register/_ag_model_register.py +0 -2
- autogluon/tabular/version.py +1 -1
- {autogluon.tabular-1.2.1b20250407.dist-info → autogluon.tabular-1.2.1b20250409.dist-info}/METADATA +13 -13
- {autogluon.tabular-1.2.1b20250407.dist-info → autogluon.tabular-1.2.1b20250409.dist-info}/RECORD +11 -22
- autogluon/tabular/models/tab_transformer/__init__.py +0 -1
- autogluon/tabular/models/tab_transformer/hyperparameters/__init__.py +0 -1
- autogluon/tabular/models/tab_transformer/hyperparameters/parameters.py +0 -66
- autogluon/tabular/models/tab_transformer/hyperparameters/searchspaces.py +0 -17
- autogluon/tabular/models/tab_transformer/modified_transformer.py +0 -494
- autogluon/tabular/models/tab_transformer/pretexts.py +0 -150
- autogluon/tabular/models/tab_transformer/tab_model_base.py +0 -86
- autogluon/tabular/models/tab_transformer/tab_transformer.py +0 -183
- autogluon/tabular/models/tab_transformer/tab_transformer_encoder.py +0 -668
- autogluon/tabular/models/tab_transformer/tab_transformer_model.py +0 -540
- autogluon/tabular/models/tab_transformer/utils.py +0 -124
- /autogluon.tabular-1.2.1b20250407-py3.9-nspkg.pth → /autogluon.tabular-1.2.1b20250409-py3.9-nspkg.pth +0 -0
- {autogluon.tabular-1.2.1b20250407.dist-info → autogluon.tabular-1.2.1b20250409.dist-info}/LICENSE +0 -0
- {autogluon.tabular-1.2.1b20250407.dist-info → autogluon.tabular-1.2.1b20250409.dist-info}/NOTICE +0 -0
- {autogluon.tabular-1.2.1b20250407.dist-info → autogluon.tabular-1.2.1b20250409.dist-info}/WHEEL +0 -0
- {autogluon.tabular-1.2.1b20250407.dist-info → autogluon.tabular-1.2.1b20250409.dist-info}/namespace_packages.txt +0 -0
- {autogluon.tabular-1.2.1b20250407.dist-info → autogluon.tabular-1.2.1b20250409.dist-info}/top_level.txt +0 -0
- {autogluon.tabular-1.2.1b20250407.dist-info → autogluon.tabular-1.2.1b20250409.dist-info}/zip-safe +0 -0
@@ -28,7 +28,6 @@ from ..models import (
|
|
28
28
|
XGBoostModel,
|
29
29
|
XTModel,
|
30
30
|
)
|
31
|
-
from ..models.tab_transformer.tab_transformer_model import TabTransformerModel
|
32
31
|
|
33
32
|
|
34
33
|
# When adding a new model officially to AutoGluon, the model class should be added to the bottom of this list.
|
@@ -42,7 +41,6 @@ REGISTERED_MODEL_CLS_LST = [
|
|
42
41
|
TabularNeuralNetTorchModel,
|
43
42
|
LinearModel,
|
44
43
|
NNFastAiTabularModel,
|
45
|
-
TabTransformerModel,
|
46
44
|
TextPredictorModel,
|
47
45
|
ImagePredictorModel,
|
48
46
|
MultiModalPredictorModel,
|
autogluon/tabular/version.py
CHANGED
{autogluon.tabular-1.2.1b20250407.dist-info → autogluon.tabular-1.2.1b20250409.dist-info}/METADATA
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: autogluon.tabular
|
3
|
-
Version: 1.2.
|
3
|
+
Version: 1.2.1b20250409
|
4
4
|
Summary: Fast and Accurate ML in 3 Lines of Code
|
5
5
|
Home-page: https://github.com/autogluon/autogluon
|
6
6
|
Author: AutoGluon Community
|
@@ -41,32 +41,32 @@ Requires-Dist: scipy<1.16,>=1.5.4
|
|
41
41
|
Requires-Dist: pandas<2.3.0,>=2.0.0
|
42
42
|
Requires-Dist: scikit-learn<1.5.3,>=1.4.0
|
43
43
|
Requires-Dist: networkx<4,>=3.0
|
44
|
-
Requires-Dist: autogluon.core==1.2.
|
45
|
-
Requires-Dist: autogluon.features==1.2.
|
44
|
+
Requires-Dist: autogluon.core==1.2.1b20250409
|
45
|
+
Requires-Dist: autogluon.features==1.2.1b20250409
|
46
46
|
Provides-Extra: all
|
47
|
+
Requires-Dist: numpy<2.0.0,>=1.25; extra == "all"
|
48
|
+
Requires-Dist: lightgbm<4.7,>=4.0; extra == "all"
|
47
49
|
Requires-Dist: catboost<1.3,>=1.2; extra == "all"
|
50
|
+
Requires-Dist: torch<2.7,>=2.2; extra == "all"
|
48
51
|
Requires-Dist: fastai<2.9,>=2.3.1; extra == "all"
|
52
|
+
Requires-Dist: autogluon.core[all]==1.2.1b20250409; extra == "all"
|
53
|
+
Requires-Dist: huggingface-hub[torch]; extra == "all"
|
54
|
+
Requires-Dist: xgboost<2.2,>=2.0; extra == "all"
|
49
55
|
Requires-Dist: einops<0.9,>=0.7; extra == "all"
|
50
56
|
Requires-Dist: spacy<3.8; extra == "all"
|
51
|
-
Requires-Dist: lightgbm<4.7,>=4.0; extra == "all"
|
52
|
-
Requires-Dist: torch<2.6,>=2.2; extra == "all"
|
53
|
-
Requires-Dist: xgboost<2.2,>=2.0; extra == "all"
|
54
|
-
Requires-Dist: huggingface-hub[torch]; extra == "all"
|
55
|
-
Requires-Dist: autogluon.core[all]==1.2.1b20250407; extra == "all"
|
56
|
-
Requires-Dist: numpy<2.0.0,>=1.25; extra == "all"
|
57
57
|
Provides-Extra: catboost
|
58
58
|
Requires-Dist: numpy<2.0.0,>=1.25; extra == "catboost"
|
59
59
|
Requires-Dist: catboost<1.3,>=1.2; extra == "catboost"
|
60
60
|
Provides-Extra: fastai
|
61
61
|
Requires-Dist: spacy<3.8; extra == "fastai"
|
62
|
-
Requires-Dist: torch<2.
|
62
|
+
Requires-Dist: torch<2.7,>=2.2; extra == "fastai"
|
63
63
|
Requires-Dist: fastai<2.9,>=2.3.1; extra == "fastai"
|
64
64
|
Provides-Extra: imodels
|
65
65
|
Requires-Dist: imodels<1.4.0,>=1.3.10; extra == "imodels"
|
66
66
|
Provides-Extra: lightgbm
|
67
67
|
Requires-Dist: lightgbm<4.7,>=4.0; extra == "lightgbm"
|
68
68
|
Provides-Extra: ray
|
69
|
-
Requires-Dist: autogluon.core[all]==1.2.
|
69
|
+
Requires-Dist: autogluon.core[all]==1.2.1b20250409; extra == "ray"
|
70
70
|
Provides-Extra: skex
|
71
71
|
Requires-Dist: scikit-learn-intelex<2025.1,>=2024.0; extra == "skex"
|
72
72
|
Provides-Extra: skl2onnx
|
@@ -78,11 +78,11 @@ Requires-Dist: onnx<1.16.2,>=1.13.0; platform_system == "Windows" and extra == "
|
|
78
78
|
Provides-Extra: tabpfn
|
79
79
|
Requires-Dist: tabpfn<0.2,>=0.1.11; extra == "tabpfn"
|
80
80
|
Provides-Extra: tabpfnmix
|
81
|
-
Requires-Dist: torch<2.
|
81
|
+
Requires-Dist: torch<2.7,>=2.2; extra == "tabpfnmix"
|
82
82
|
Requires-Dist: huggingface-hub[torch]; extra == "tabpfnmix"
|
83
83
|
Requires-Dist: einops<0.9,>=0.7; extra == "tabpfnmix"
|
84
84
|
Provides-Extra: tests
|
85
|
-
Requires-Dist: torch<2.
|
85
|
+
Requires-Dist: torch<2.7,>=2.2; extra == "tests"
|
86
86
|
Requires-Dist: huggingface-hub[torch]; extra == "tests"
|
87
87
|
Requires-Dist: einops<0.9,>=0.7; extra == "tests"
|
88
88
|
Requires-Dist: imodels<1.4.0,>=1.3.10; extra == "tests"
|
{autogluon.tabular-1.2.1b20250407.dist-info → autogluon.tabular-1.2.1b20250409.dist-info}/RECORD
RENAMED
@@ -1,6 +1,6 @@
|
|
1
|
-
autogluon.tabular-1.2.
|
1
|
+
autogluon.tabular-1.2.1b20250409-py3.9-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
|
2
2
|
autogluon/tabular/__init__.py,sha256=2OXpJCvENRHubBTYNIPpHX93WWuFZzsJBtTZbNVHVas,400
|
3
|
-
autogluon/tabular/version.py,sha256=
|
3
|
+
autogluon/tabular/version.py,sha256=A9BqrXhLrOJhH5qPbhMof-jLGxRS5nRGGDYPGWeziJE,91
|
4
4
|
autogluon/tabular/configs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
5
|
autogluon/tabular/configs/config_helper.py,sha256=Pb2aW9Z9w77pYKPRVZ3nBzHY3KJaiEJSJ747zZcJIVk,21132
|
6
6
|
autogluon/tabular/configs/feature_generator_presets.py,sha256=EV5Ym8VW15q92MwOUpTi7wZFS2QooM51fLg3RdUsn-M,1223
|
@@ -74,17 +74,6 @@ autogluon/tabular/models/rf/rf_rapids_model.py,sha256=3s-8M11dzCl_2Lu5iB3H8YjHLg
|
|
74
74
|
autogluon/tabular/models/rf/compilers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
75
75
|
autogluon/tabular/models/rf/compilers/native.py,sha256=HhaqQRkVuf9UEEJPsHcdYCmuWBMYtyqRwwB_N2qxG2M,1313
|
76
76
|
autogluon/tabular/models/rf/compilers/onnx.py,sha256=pvaZWdl2JJaE2pFU0mFugzhnybePqe0x1-5oLOvogA0,4318
|
77
|
-
autogluon/tabular/models/tab_transformer/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
78
|
-
autogluon/tabular/models/tab_transformer/modified_transformer.py,sha256=3UNhZqKChkWCPtSP4YP23JhtggLqa4mLlP__jvxfBko,22894
|
79
|
-
autogluon/tabular/models/tab_transformer/pretexts.py,sha256=UEoDq_8hLKbY7EbE5IyL_gUXpuQ607XTtS-jKqf8j8U,6564
|
80
|
-
autogluon/tabular/models/tab_transformer/tab_model_base.py,sha256=4rmY1IrwoFuJejy-9gOoYSz-ar3DvZY8uXyDUBKk7Iw,3615
|
81
|
-
autogluon/tabular/models/tab_transformer/tab_transformer.py,sha256=1c1oTJfSsGxQjzZJVN8doqFmYV-Wwwbqcu7RcW77kJk,6991
|
82
|
-
autogluon/tabular/models/tab_transformer/tab_transformer_encoder.py,sha256=v2G1S_MSESzKqtvSfxS5uEse2CWtOn_K2E-uIwuE6zI,24701
|
83
|
-
autogluon/tabular/models/tab_transformer/tab_transformer_model.py,sha256=5J3xeIWEoSMljJ-ghMeRmHv0sFqY3ASJ2aN7ivOIqww,22917
|
84
|
-
autogluon/tabular/models/tab_transformer/utils.py,sha256=rrNk0X6Y0vzt7ivylRmTf0zjDx1DIF-5_ibf6B9Taz8,4554
|
85
|
-
autogluon/tabular/models/tab_transformer/hyperparameters/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
86
|
-
autogluon/tabular/models/tab_transformer/hyperparameters/parameters.py,sha256=-vJRG8PVj5FgQnF9FNJHMvoIzzyazGE4XLRyQKL5VT8,3854
|
87
|
-
autogluon/tabular/models/tab_transformer/hyperparameters/searchspaces.py,sha256=poiOFwOVIf1ONcPIjOqsA31YbqBgWxy0DlVFpVwKNHM,650
|
88
77
|
autogluon/tabular/models/tabpfn/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
89
78
|
autogluon/tabular/models/tabpfn/tabpfn_model.py,sha256=PEYMuIh5TFLIDy3hcjfz1DcvDu77rbwRq0pKWyuUR04,6787
|
90
79
|
autogluon/tabular/models/tabpfnmix/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -143,7 +132,7 @@ autogluon/tabular/predictor/__init__.py,sha256=zCMgjxQlWpDWnr1l1xjBCiK3rWC3N3RoD
|
|
143
132
|
autogluon/tabular/predictor/interpretable_predictor.py,sha256=5UeKgnMFsfY65tiO3kxfHBPr03lyswLrgdtjPhI0Y7Q,6934
|
144
133
|
autogluon/tabular/predictor/predictor.py,sha256=jOkpypHAPrL2nsI4iypVkZV90TpMORK-G_Ixr3Kw3XQ,357182
|
145
134
|
autogluon/tabular/register/__init__.py,sha256=7CLOTWIUho0wi4eAwhYJ5Y0PfvNCWKnRwlw3bwYoTNE,93
|
146
|
-
autogluon/tabular/register/_ag_model_register.py,sha256=
|
135
|
+
autogluon/tabular/register/_ag_model_register.py,sha256=afDg51h07vImG8p2YZvzT5IT1lkpti4m2n92FhbDcfw,1414
|
147
136
|
autogluon/tabular/register/_model_register.py,sha256=jqSg0d89dXAAcp-OT4II90ce994ByKMMzAYmpkyaRbI,6824
|
148
137
|
autogluon/tabular/testing/__init__.py,sha256=XrEGLmMdmRT6QHNR13M9wna57LO4O3Q4tt27Ca8omAc,79
|
149
138
|
autogluon/tabular/testing/fit_helper.py,sha256=gVHTdAsp_lSZ_qbwjXM7aA5fI32zHj3_zXwEXC9C_ds,19586
|
@@ -157,11 +146,11 @@ autogluon/tabular/trainer/model_presets/presets.py,sha256=bTPGPyz07a7GG6327yO6ry
|
|
157
146
|
autogluon/tabular/trainer/model_presets/presets_distill.py,sha256=MnFC2GJc6RmDBNAGbsO2XMfo3PjR8cUrZoilWW8gTYQ,3295
|
158
147
|
autogluon/tabular/tuning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
159
148
|
autogluon/tabular/tuning/feature_pruner.py,sha256=9iNku8gVbYEkjuKlyITPJDicsNkoraaQOlINQq9iZlQ,6877
|
160
|
-
autogluon.tabular-1.2.
|
161
|
-
autogluon.tabular-1.2.
|
162
|
-
autogluon.tabular-1.2.
|
163
|
-
autogluon.tabular-1.2.
|
164
|
-
autogluon.tabular-1.2.
|
165
|
-
autogluon.tabular-1.2.
|
166
|
-
autogluon.tabular-1.2.
|
167
|
-
autogluon.tabular-1.2.
|
149
|
+
autogluon.tabular-1.2.1b20250409.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
|
150
|
+
autogluon.tabular-1.2.1b20250409.dist-info/METADATA,sha256=iDCe-gsgrr9MAg1N87btX4Zb2DQUAO-v1k14CK-BJKw,14069
|
151
|
+
autogluon.tabular-1.2.1b20250409.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
|
152
|
+
autogluon.tabular-1.2.1b20250409.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
153
|
+
autogluon.tabular-1.2.1b20250409.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
154
|
+
autogluon.tabular-1.2.1b20250409.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
155
|
+
autogluon.tabular-1.2.1b20250409.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
156
|
+
autogluon.tabular-1.2.1b20250409.dist-info/RECORD,,
|
@@ -1 +0,0 @@
|
|
1
|
-
|
@@ -1 +0,0 @@
|
|
1
|
-
|
@@ -1,66 +0,0 @@
|
|
1
|
-
def get_fixed_params():
|
2
|
-
"""Parameters that currently cannot be searched during HPO"""
|
3
|
-
fixed_params = {
|
4
|
-
"batch_size": 512, # The size of example chunks to predict on.
|
5
|
-
"n_cont_embeddings": 0, # How many continuous feature embeddings to use.
|
6
|
-
"norm_class_name": "LayerNorm", # What kind of normalization to use on continuous features.
|
7
|
-
"column_embedding": True, # If True, 1/(n_shared_embs)th of every embedding will be reserved for a learned parameter that's common to all embeddings.
|
8
|
-
#'shared_embedding': False,
|
9
|
-
#'n_shared_embs': 8,
|
10
|
-
"orig_emb_resid": False, # If True, concatenate the original embeddings on top of the feature embeddings in the Transformer layers.
|
11
|
-
"one_hot_embeddings": False, # If True, one-hot encode variables whose cardinality is < max_emb_dim.
|
12
|
-
"drop_whole_embeddings": False, # If True, dropout pretends the embedding was a missing value. If false, dropout sets embed features to 0
|
13
|
-
"max_emb_dim": 8, # Maximum allowable amount of embeddings.
|
14
|
-
"base_exp_decay": 0.95, # Rate of exponential decay for learning rate, used during finetuning.
|
15
|
-
"encoders": {
|
16
|
-
"CATEGORICAL": "CategoricalOrdinalEnc", # How to "encode"(vectorize) each column type.
|
17
|
-
"DATETIME": "DatetimeOrdinalEnc",
|
18
|
-
"LATLONG": "LatLongQuantileOrdinalEnc",
|
19
|
-
"SCALAR": "ScalarQuantileOrdinalEnc",
|
20
|
-
"TEXT": "TextSummaryScalarEnc",
|
21
|
-
},
|
22
|
-
"aug_mask_prob": 0.4, # What percentage of values to apply augmentation to.
|
23
|
-
"num_augs": 0, # Number of augmentations to add.
|
24
|
-
"pretext": "BERTPretext", # What pretext to use when performing pretraining/semi-supervised learning.
|
25
|
-
"n_cont_features": 8, # How many continuous features to concatenate onto the categorical features
|
26
|
-
"fix_attention": False, # If True, use the categorical embeddings in the transformer architecture.
|
27
|
-
"epochs": 200, # How many epochs to train on with labeled data.
|
28
|
-
"pretrain_epochs": 200, # How many epochs to pretrain on with unlabeled data.
|
29
|
-
"epochs_wo_improve": 30, # How many epochs to continue running without improving on metric. aka "Early Stopping Patience"
|
30
|
-
"num_workers": 16, # How many workers to use for torch DataLoader.
|
31
|
-
"max_columns": 500, # Maximum number of columns TabTransformer will accept as input. This is to combat huge memory requirements/errors.
|
32
|
-
"tab_readout": "none", # What sort of readout from the transformer. Options: ['readout_emb', 'mean', 'concat_pool', 'concat_pool_all', 'concat_pool_add', 'all_feat_embs', 'mean_feat_embs', 'none']
|
33
|
-
}
|
34
|
-
|
35
|
-
return fixed_params
|
36
|
-
|
37
|
-
|
38
|
-
def get_hyper_params():
|
39
|
-
"""Parameters that currently can be tuned during HPO"""
|
40
|
-
hyper_params = {
|
41
|
-
"lr": 3.6e-3, # Learning rate
|
42
|
-
# Options: Real(5e-5, 5e-3)
|
43
|
-
"weight_decay": 1e-6, # Rate of linear weight decay for learning rate
|
44
|
-
# Options: Real(1e-6, 5e-2)
|
45
|
-
"p_dropout": 0, # dropout probability, 0 turns off Dropout.
|
46
|
-
# Options: Categorical(0, 0.1, 0.2, 0.3, 0.4, 0.5)
|
47
|
-
"n_heads": 4, # Number of attention heads
|
48
|
-
# Options: Categorical(2, 4, 8)
|
49
|
-
"hidden_dim": 128, # hidden dimension size
|
50
|
-
# Options: Categorical(32, 64, 128, 256)
|
51
|
-
"n_layers": 2, # Number of Tab Transformer encoder layers,
|
52
|
-
# Options: Categorical(1, 2, 3, 4, 5)
|
53
|
-
"feature_dim": 64, # Size of fully connected layer in TabNet.
|
54
|
-
# Options: Int(8, 128)
|
55
|
-
"num_output_layers": 1, # How many fully-connected layers on top of transformer to produce predictions. Minimum 1 layer.
|
56
|
-
# Options: Categorical(1, 2, 3)
|
57
|
-
}
|
58
|
-
|
59
|
-
return hyper_params
|
60
|
-
|
61
|
-
|
62
|
-
def get_default_param():
|
63
|
-
params = get_fixed_params()
|
64
|
-
params.update(get_hyper_params())
|
65
|
-
|
66
|
-
return params
|
@@ -1,17 +0,0 @@
|
|
1
|
-
from autogluon.common import space
|
2
|
-
|
3
|
-
|
4
|
-
# TODO: May have to split search space's by problem type. Not necessary right now.
|
5
|
-
def get_default_searchspace():
|
6
|
-
params = {
|
7
|
-
"lr": space.Real(5e-5, 5e-3, default=1e-3, log=True),
|
8
|
-
"weight_decay": space.Real(1e-6, 5e-2, default=1e-6, log=True),
|
9
|
-
"p_dropout": space.Categorical(0.1, 0, 0.5),
|
10
|
-
"n_heads": space.Categorical(8, 4),
|
11
|
-
"hidden_dim": space.Categorical(128, 32, 64, 256),
|
12
|
-
"n_layers": space.Categorical(2, 1, 3, 4, 5),
|
13
|
-
"feature_dim": space.Int(8, 128, default=64),
|
14
|
-
"num_output_layers": space.Categorical(1, 2),
|
15
|
-
}
|
16
|
-
|
17
|
-
return params.copy()
|