autogluon.tabular 1.2.1b20250225__py3-none-any.whl → 1.2.1b20250227__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autogluon/tabular/learner/abstract_learner.py +12 -12
- autogluon/tabular/learner/default_learner.py +12 -10
- autogluon/tabular/models/automm/automm_model.py +2 -0
- autogluon/tabular/models/automm/ft_transformer.py +3 -0
- autogluon/tabular/models/catboost/catboost_model.py +7 -0
- autogluon/tabular/models/fastainn/tabular_nn_fastai.py +10 -1
- autogluon/tabular/models/fasttext/fasttext_model.py +3 -0
- autogluon/tabular/models/image_prediction/image_predictor.py +2 -0
- autogluon/tabular/models/imodels/imodels_models.py +15 -0
- autogluon/tabular/models/knn/knn_model.py +3 -0
- autogluon/tabular/models/lgb/lgb_model.py +7 -0
- autogluon/tabular/models/lr/lr_model.py +3 -0
- autogluon/tabular/models/rf/rf_model.py +3 -0
- autogluon/tabular/models/tab_transformer/tab_transformer_model.py +2 -0
- autogluon/tabular/models/tabpfn/tabpfn_model.py +3 -0
- autogluon/tabular/models/tabpfnmix/tabpfnmix_model.py +4 -0
- autogluon/tabular/models/tabular_nn/torch/tabular_nn_torch.py +3 -0
- autogluon/tabular/models/text_prediction/text_prediction_v1_model.py +3 -0
- autogluon/tabular/models/vowpalwabbit/vowpalwabbit_model.py +3 -0
- autogluon/tabular/models/xgboost/xgboost_model.py +3 -0
- autogluon/tabular/models/xt/xt_model.py +3 -0
- autogluon/tabular/predictor/predictor.py +30 -3
- autogluon/tabular/register/__init__.py +2 -0
- autogluon/tabular/register/_ag_model_register.py +66 -0
- autogluon/tabular/register/_model_register.py +146 -0
- autogluon/tabular/trainer/abstract_trainer.py +12 -0
- autogluon/tabular/trainer/model_presets/presets.py +10 -116
- autogluon/tabular/version.py +1 -1
- {autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/METADATA +12 -12
- {autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/RECORD +37 -34
- /autogluon.tabular-1.2.1b20250225-py3.9-nspkg.pth → /autogluon.tabular-1.2.1b20250227-py3.9-nspkg.pth +0 -0
- {autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/LICENSE +0 -0
- {autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/NOTICE +0 -0
- {autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/WHEEL +0 -0
- {autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/namespace_packages.txt +0 -0
- {autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/top_level.txt +0 -0
- {autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/zip-safe +0 -0
@@ -39,19 +39,19 @@ class AbstractTabularLearner(AbstractLearner):
|
|
39
39
|
self,
|
40
40
|
path_context: str,
|
41
41
|
label: str,
|
42
|
-
feature_generator: PipelineFeatureGenerator,
|
42
|
+
feature_generator: PipelineFeatureGenerator | None = None,
|
43
43
|
ignored_columns: list = None,
|
44
|
-
label_count_threshold=10,
|
45
|
-
problem_type=None,
|
46
|
-
quantile_levels=None,
|
47
|
-
eval_metric=None,
|
48
|
-
positive_class=None,
|
49
|
-
cache_data=True,
|
50
|
-
is_trainer_present=False,
|
51
|
-
random_state=0,
|
52
|
-
sample_weight=None,
|
53
|
-
weight_evaluation=False,
|
54
|
-
groups=None,
|
44
|
+
label_count_threshold: int = 10,
|
45
|
+
problem_type: str | None = None,
|
46
|
+
quantile_levels: list[float] | None = None,
|
47
|
+
eval_metric: Scorer | None = None,
|
48
|
+
positive_class: str | None = None,
|
49
|
+
cache_data: bool = True,
|
50
|
+
is_trainer_present: bool = False,
|
51
|
+
random_state: int = 0,
|
52
|
+
sample_weight: str | None = None,
|
53
|
+
weight_evaluation: bool = False,
|
54
|
+
groups: str | None = None,
|
55
55
|
):
|
56
56
|
super().__init__(path_context=path_context, random_state=random_state)
|
57
57
|
self.label = label
|
@@ -43,16 +43,17 @@ class DefaultLearner(AbstractTabularLearner):
|
|
43
43
|
def _fit(
|
44
44
|
self,
|
45
45
|
X: DataFrame,
|
46
|
-
X_val: DataFrame = None,
|
47
|
-
X_test: DataFrame = None,
|
48
|
-
X_unlabeled: DataFrame = None,
|
49
|
-
holdout_frac=0.1,
|
50
|
-
num_bag_folds=0,
|
51
|
-
num_bag_sets=1,
|
52
|
-
time_limit=None,
|
53
|
-
infer_limit=None,
|
54
|
-
infer_limit_batch_size=None,
|
55
|
-
verbosity=2,
|
46
|
+
X_val: DataFrame | None = None,
|
47
|
+
X_test: DataFrame | None = None,
|
48
|
+
X_unlabeled: DataFrame | None = None,
|
49
|
+
holdout_frac: float = 0.1,
|
50
|
+
num_bag_folds: int = 0,
|
51
|
+
num_bag_sets: int = 1,
|
52
|
+
time_limit: float | None = None,
|
53
|
+
infer_limit: float | None = None,
|
54
|
+
infer_limit_batch_size: int | None = None,
|
55
|
+
verbosity: int = 2,
|
56
|
+
raise_on_model_failure: bool = False,
|
56
57
|
**trainer_fit_kwargs,
|
57
58
|
):
|
58
59
|
"""Arguments:
|
@@ -121,6 +122,7 @@ class DefaultLearner(AbstractTabularLearner):
|
|
121
122
|
save_data=self.cache_data,
|
122
123
|
random_state=self.random_state,
|
123
124
|
verbosity=verbosity,
|
125
|
+
raise_on_model_failure=raise_on_model_failure,
|
124
126
|
)
|
125
127
|
|
126
128
|
self.trainer_path = trainer.path
|
@@ -12,6 +12,9 @@ logger = logging.getLogger(__name__)
|
|
12
12
|
|
13
13
|
# TODO: Add unit tests
|
14
14
|
class FTTransformerModel(MultiModalPredictorModel):
|
15
|
+
ag_key = "FT_TRANSFORMER"
|
16
|
+
ag_name = "FTTransformer"
|
17
|
+
|
15
18
|
def __init__(self, **kwargs):
|
16
19
|
"""Wrapper of autogluon.multimodal.MultiModalPredictor.
|
17
20
|
|
@@ -2,6 +2,7 @@ import logging
|
|
2
2
|
import math
|
3
3
|
import os
|
4
4
|
import time
|
5
|
+
from types import MappingProxyType
|
5
6
|
|
6
7
|
import numpy as np
|
7
8
|
import pandas as pd
|
@@ -30,6 +31,12 @@ class CatBoostModel(AbstractModel):
|
|
30
31
|
|
31
32
|
Hyperparameter options: https://catboost.ai/en/docs/references/training-parameters
|
32
33
|
"""
|
34
|
+
ag_key = "CAT"
|
35
|
+
ag_name = "CatBoost"
|
36
|
+
ag_priority = 70
|
37
|
+
ag_priority_by_problem_type = MappingProxyType({
|
38
|
+
SOFTCLASS: 60
|
39
|
+
})
|
33
40
|
|
34
41
|
def __init__(self, **kwargs):
|
35
42
|
super().__init__(**kwargs)
|
@@ -8,6 +8,7 @@ import warnings
|
|
8
8
|
from builtins import classmethod
|
9
9
|
from functools import partial
|
10
10
|
from pathlib import Path
|
11
|
+
from types import MappingProxyType
|
11
12
|
from typing import Union
|
12
13
|
|
13
14
|
import numpy as np
|
@@ -28,7 +29,7 @@ from autogluon.common.features.types import (
|
|
28
29
|
from autogluon.common.utils.pandas_utils import get_approximate_df_mem_usage
|
29
30
|
from autogluon.common.utils.resource_utils import ResourceManager
|
30
31
|
from autogluon.common.utils.try_import import try_import_fastai
|
31
|
-
from autogluon.core.constants import BINARY, QUANTILE, REGRESSION
|
32
|
+
from autogluon.core.constants import BINARY, MULTICLASS, QUANTILE, REGRESSION
|
32
33
|
from autogluon.core.hpo.constants import RAY_BACKEND
|
33
34
|
from autogluon.core.models import AbstractModel
|
34
35
|
from autogluon.core.utils.exceptions import TimeLimitExceeded
|
@@ -92,6 +93,14 @@ class NNFastAiTabularModel(AbstractModel):
|
|
92
93
|
'early.stopping.min_delta': 0.0001,
|
93
94
|
'early.stopping.patience': 10,
|
94
95
|
"""
|
96
|
+
ag_key = "FASTAI"
|
97
|
+
ag_name = "NeuralNetFastAI"
|
98
|
+
ag_priority = 50
|
99
|
+
# Increase priority for multiclass since neural networks
|
100
|
+
# scale better than trees as a function of n_classes.
|
101
|
+
ag_priority_by_problem_type = MappingProxyType({
|
102
|
+
MULTICLASS: 95,
|
103
|
+
})
|
95
104
|
|
96
105
|
model_internals_file_name = "model-internals.pkl"
|
97
106
|
|
@@ -22,6 +22,8 @@ class ImagePredictorModel(MultiModalPredictorModel):
|
|
22
22
|
Additionally has special null image handling to improve performance in the presence of null images (aka image path of '')
|
23
23
|
Note: null handling has not been compared to the built-in null handling of MultimodalPredictor yet.
|
24
24
|
"""
|
25
|
+
ag_key = "AG_IMAGE_NN"
|
26
|
+
ag_name = "ImagePredictor"
|
25
27
|
|
26
28
|
def __init__(self, **kwargs):
|
27
29
|
super().__init__(**kwargs)
|
@@ -75,6 +75,9 @@ class _IModelsModel(AbstractModel):
|
|
75
75
|
|
76
76
|
|
77
77
|
class RuleFitModel(_IModelsModel):
|
78
|
+
ag_key = "IM_RULEFIT"
|
79
|
+
ag_name = "RuleFit"
|
80
|
+
|
78
81
|
def get_model(self):
|
79
82
|
try_import_imodels()
|
80
83
|
from imodels import RuleFitClassifier, RuleFitRegressor
|
@@ -86,6 +89,9 @@ class RuleFitModel(_IModelsModel):
|
|
86
89
|
|
87
90
|
|
88
91
|
class GreedyTreeModel(_IModelsModel):
|
92
|
+
ag_key = "IM_GREEDYTREE"
|
93
|
+
ag_name = "GreedyTree"
|
94
|
+
|
89
95
|
def get_model(self):
|
90
96
|
try_import_imodels()
|
91
97
|
from imodels import GreedyTreeClassifier
|
@@ -98,6 +104,9 @@ class GreedyTreeModel(_IModelsModel):
|
|
98
104
|
|
99
105
|
|
100
106
|
class BoostedRulesModel(_IModelsModel):
|
107
|
+
ag_key = "IM_BOOSTEDRULES"
|
108
|
+
ag_name = "BoostedRules"
|
109
|
+
|
101
110
|
def get_model(self):
|
102
111
|
try_import_imodels()
|
103
112
|
from imodels import BoostedRulesClassifier
|
@@ -109,6 +118,9 @@ class BoostedRulesModel(_IModelsModel):
|
|
109
118
|
|
110
119
|
|
111
120
|
class HSTreeModel(_IModelsModel):
|
121
|
+
ag_key = "IM_HSTREE"
|
122
|
+
ag_name = "HierarchicalShrinkageTree"
|
123
|
+
|
112
124
|
def get_model(self):
|
113
125
|
try_import_imodels()
|
114
126
|
from imodels import HSTreeClassifierCV, HSTreeRegressorCV
|
@@ -120,6 +132,9 @@ class HSTreeModel(_IModelsModel):
|
|
120
132
|
|
121
133
|
|
122
134
|
class FigsModel(_IModelsModel):
|
135
|
+
ag_key = "IM_FIGS"
|
136
|
+
ag_name = "Figs"
|
137
|
+
|
123
138
|
def get_model(self):
|
124
139
|
try_import_imodels()
|
125
140
|
from imodels import FIGSClassifier, FIGSRegressor
|
@@ -22,6 +22,9 @@ class KNNModel(AbstractModel):
|
|
22
22
|
"""
|
23
23
|
KNearestNeighbors model (scikit-learn): https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
|
24
24
|
"""
|
25
|
+
ag_key = "KNN"
|
26
|
+
ag_name = "KNeighbors"
|
27
|
+
ag_priority = 100
|
25
28
|
|
26
29
|
def __init__(self, **kwargs):
|
27
30
|
super().__init__(**kwargs)
|
@@ -7,6 +7,7 @@ import random
|
|
7
7
|
import re
|
8
8
|
import time
|
9
9
|
import warnings
|
10
|
+
from types import MappingProxyType
|
10
11
|
|
11
12
|
import numpy as np
|
12
13
|
import pandas as pd
|
@@ -40,6 +41,12 @@ class LGBModel(AbstractModel):
|
|
40
41
|
Extra hyperparameter options:
|
41
42
|
ag.early_stop : int, specifies the early stopping rounds. Defaults to an adaptive strategy. Recommended to keep default.
|
42
43
|
"""
|
44
|
+
ag_key = "GBM"
|
45
|
+
ag_name = "LightGBM"
|
46
|
+
ag_priority = 90
|
47
|
+
ag_priority_by_problem_type = MappingProxyType({
|
48
|
+
SOFTCLASS: 100
|
49
|
+
})
|
43
50
|
|
44
51
|
def __init__(self, **kwargs):
|
45
52
|
super().__init__(**kwargs)
|
@@ -38,6 +38,9 @@ class LinearModel(AbstractModel):
|
|
38
38
|
|
39
39
|
'regression': https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html#sklearn.linear_model.Ridge
|
40
40
|
"""
|
41
|
+
ag_key = "LR"
|
42
|
+
ag_name = "LinearModel"
|
43
|
+
ag_priority = 30
|
41
44
|
|
42
45
|
def __init__(self, **kwargs):
|
43
46
|
super().__init__(**kwargs)
|
@@ -27,6 +27,9 @@ class RFModel(AbstractModel):
|
|
27
27
|
"""
|
28
28
|
Random Forest model (scikit-learn): https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
|
29
29
|
"""
|
30
|
+
ag_key = "RF"
|
31
|
+
ag_name = "RandomForest"
|
32
|
+
ag_priority = 80
|
30
33
|
|
31
34
|
def __init__(self, **kwargs):
|
32
35
|
super().__init__(**kwargs)
|
@@ -40,6 +40,8 @@ class TabTransformerModel(AbstractNeuralNetworkModel):
|
|
40
40
|
and applies them to the use case of tabular data. Specifically, this makes TabTransformer suitable for unsupervised
|
41
41
|
training of Tabular data with a subsequent fine-tuning step on labeled data.
|
42
42
|
"""
|
43
|
+
ag_key = "TRANSF"
|
44
|
+
ag_name = "Transformer"
|
43
45
|
|
44
46
|
params_file_name = "tab_trans_params.pth"
|
45
47
|
|
@@ -22,6 +22,9 @@ class TabPFNModel(AbstractModel):
|
|
22
22
|
To use this model, `tabpfn` must be installed.
|
23
23
|
To install TabPFN, you can run `pip install autogluon.tabular[tabpfn]` or `pip install tabpfn`.
|
24
24
|
"""
|
25
|
+
ag_key = "TABPFN"
|
26
|
+
ag_name = "TabPFN"
|
27
|
+
ag_priority = 110
|
25
28
|
|
26
29
|
def __init__(self, **kwargs):
|
27
30
|
super().__init__(**kwargs)
|
@@ -35,6 +35,10 @@ class TabPFNMixModel(AbstractModel):
|
|
35
35
|
|
36
36
|
For more information, refer to the `./_internals/README.md` file.
|
37
37
|
"""
|
38
|
+
ag_key = "TABPFNMIX"
|
39
|
+
ag_name = "TabPFNMix"
|
40
|
+
ag_priority = 45
|
41
|
+
|
38
42
|
weights_file_name = "model.pt"
|
39
43
|
|
40
44
|
def __init__(self, **kwargs):
|
@@ -47,6 +47,9 @@ class TabularNeuralNetTorchModel(AbstractNeuralNetworkModel):
|
|
47
47
|
ag.early_stop : int | str, default = "default"
|
48
48
|
Specifies the early stopping rounds. Defaults to an adaptive strategy. Recommended to keep default.
|
49
49
|
"""
|
50
|
+
ag_key = "NN_TORCH"
|
51
|
+
ag_name = "NeuralNetTorch"
|
52
|
+
ag_priority = 25
|
50
53
|
|
51
54
|
# Constants used throughout this class:
|
52
55
|
unique_category_str = np.nan # string used to represent missing values and unknown categories for categorical features.
|
@@ -19,6 +19,9 @@ logger = logging.getLogger(__name__)
|
|
19
19
|
class TextPredictorModel(MultiModalPredictorModel):
|
20
20
|
"""MultimodalPredictor that doesn't use image features"""
|
21
21
|
|
22
|
+
ag_key = "AG_TEXT_NN"
|
23
|
+
ag_name = "TextPredictor"
|
24
|
+
|
22
25
|
def _get_default_auxiliary_params(self) -> dict:
|
23
26
|
default_auxiliary_params = super()._get_default_auxiliary_params()
|
24
27
|
extra_auxiliary_params = dict(
|
@@ -38,6 +38,9 @@ class VowpalWabbitModel(AbstractModel):
|
|
38
38
|
VowpalWabbit Command Line args: https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Command-line-arguments
|
39
39
|
|
40
40
|
"""
|
41
|
+
ag_key = "VW"
|
42
|
+
ag_name = "VowpalWabbit"
|
43
|
+
ag_priority = 10
|
41
44
|
|
42
45
|
model_internals_file_name = "model-internals.pkl"
|
43
46
|
|
@@ -7,6 +7,9 @@ class XTModel(RFModel):
|
|
7
7
|
"""
|
8
8
|
Extra Trees model (scikit-learn): https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html#sklearn.ensemble.ExtraTreesClassifier
|
9
9
|
"""
|
10
|
+
ag_key = "XT"
|
11
|
+
ag_name = "ExtraTrees"
|
12
|
+
ag_priority = 60
|
10
13
|
|
11
14
|
def _get_model_type(self):
|
12
15
|
if self.problem_type == REGRESSION:
|
@@ -990,6 +990,14 @@ class TabularPredictor:
|
|
990
990
|
to any amount of labeled data.
|
991
991
|
verbosity : int
|
992
992
|
If specified, overrides the existing `predictor.verbosity` value.
|
993
|
+
raise_on_model_failure: bool, default = False
|
994
|
+
If True, will raise on any exception during model training.
|
995
|
+
This is useful when using a debugger during development to identify the cause of model failures.
|
996
|
+
This should only be used for debugging.
|
997
|
+
If False, will try to skip to the next model if an exception occurred during model training.
|
998
|
+
This is the default logic and is a core principle of AutoGluon's design.
|
999
|
+
|
1000
|
+
.. versionadded:: 1.3.0
|
993
1001
|
raise_on_no_models_fitted: bool, default = True
|
994
1002
|
If True, will raise a RuntimeError if no models were successfully fit during `fit()`.
|
995
1003
|
calibrate: bool or str, default = 'auto'
|
@@ -1109,6 +1117,7 @@ class TabularPredictor:
|
|
1109
1117
|
delay_bag_sets: bool = kwargs["delay_bag_sets"]
|
1110
1118
|
test_data = kwargs["test_data"]
|
1111
1119
|
learning_curves = kwargs["learning_curves"]
|
1120
|
+
raise_on_model_failure = kwargs["raise_on_model_failure"]
|
1112
1121
|
|
1113
1122
|
if ag_args is None:
|
1114
1123
|
ag_args = {}
|
@@ -1256,6 +1265,7 @@ class TabularPredictor:
|
|
1256
1265
|
verbosity=verbosity,
|
1257
1266
|
use_bag_holdout=use_bag_holdout,
|
1258
1267
|
callbacks=callbacks,
|
1268
|
+
raise_on_model_failure=raise_on_model_failure,
|
1259
1269
|
)
|
1260
1270
|
ag_post_fit_kwargs = dict(
|
1261
1271
|
keep_only_best=kwargs["keep_only_best"],
|
@@ -4330,7 +4340,14 @@ class TabularPredictor:
|
|
4330
4340
|
reduce_children=reduce_children,
|
4331
4341
|
)
|
4332
4342
|
|
4333
|
-
def delete_models(
|
4343
|
+
def delete_models(
|
4344
|
+
self,
|
4345
|
+
models_to_keep: str | list[str] | None = None,
|
4346
|
+
models_to_delete: str | list[str] | None = None,
|
4347
|
+
allow_delete_cascade: bool = False,
|
4348
|
+
delete_from_disk: bool = True,
|
4349
|
+
dry_run: bool | None = None,
|
4350
|
+
):
|
4334
4351
|
"""
|
4335
4352
|
Deletes models from `predictor`.
|
4336
4353
|
This can be helpful to minimize memory usage and disk usage, particularly for model deployment.
|
@@ -4341,13 +4358,13 @@ class TabularPredictor:
|
|
4341
4358
|
|
4342
4359
|
Parameters
|
4343
4360
|
----------
|
4344
|
-
models_to_keep : str or list, default = None
|
4361
|
+
models_to_keep : str or list[str], default = None
|
4345
4362
|
Name of model or models to not delete.
|
4346
4363
|
All models that are not specified and are also not required as a dependency of any model in `models_to_keep` will be deleted.
|
4347
4364
|
Specify `models_to_keep='best'` to keep only the best model and its model dependencies.
|
4348
4365
|
`models_to_delete` must be None if `models_to_keep` is set.
|
4349
4366
|
To see the list of possible model names, use: `predictor.model_names()` or `predictor.leaderboard()`.
|
4350
|
-
models_to_delete : str or list, default = None
|
4367
|
+
models_to_delete : str or list[str], default = None
|
4351
4368
|
Name of model or models to delete.
|
4352
4369
|
All models that are not specified but depend on a model in `models_to_delete` will also be deleted.
|
4353
4370
|
`models_to_keep` must be None if `models_to_delete` is set.
|
@@ -4361,10 +4378,19 @@ class TabularPredictor:
|
|
4361
4378
|
WARNING: This deletes the entire directory for the deleted models, and ALL FILES located there.
|
4362
4379
|
It is highly recommended to first run with `dry_run=True` to understand which directories will be deleted.
|
4363
4380
|
dry_run : bool, default = True
|
4381
|
+
WARNING: Starting in v1.4.0 dry_run will default to False.
|
4364
4382
|
If `True`, then deletions don't occur, and logging statements are printed describing what would have occurred.
|
4365
4383
|
Set `dry_run=False` to perform the deletions.
|
4366
4384
|
|
4367
4385
|
"""
|
4386
|
+
if dry_run is None:
|
4387
|
+
warnings.warn(
|
4388
|
+
f"dry_run was not specified for `TabularPredictor.delete_models`. dry_run prior to version 1.4.0 defaults to True. "
|
4389
|
+
f"Starting in version 1.4, AutoGluon will default dry_run to False. "
|
4390
|
+
f"If you want to maintain the current logic in future versions, explicitly specify `dry_run=True`.",
|
4391
|
+
category=FutureWarning,
|
4392
|
+
)
|
4393
|
+
dry_run = True
|
4368
4394
|
self._assert_is_fit("delete_models")
|
4369
4395
|
if models_to_keep == "best":
|
4370
4396
|
models_to_keep = self.model_best
|
@@ -5020,6 +5046,7 @@ class TabularPredictor:
|
|
5020
5046
|
# learning curves and test data (for logging purposes only)
|
5021
5047
|
learning_curves=False,
|
5022
5048
|
test_data=None,
|
5049
|
+
raise_on_model_failure=False,
|
5023
5050
|
)
|
5024
5051
|
kwargs, ds_valid_keys = self._sanitize_dynamic_stacking_kwargs(kwargs)
|
5025
5052
|
kwargs = self._validate_fit_extra_kwargs(kwargs, extra_valid_keys=list(fit_kwargs_default.keys()) + ds_valid_keys)
|
@@ -0,0 +1,66 @@
|
|
1
|
+
from autogluon.core.models import (
|
2
|
+
DummyModel,
|
3
|
+
GreedyWeightedEnsembleModel,
|
4
|
+
SimpleWeightedEnsembleModel,
|
5
|
+
)
|
6
|
+
|
7
|
+
from . import ModelRegister
|
8
|
+
from ..models import (
|
9
|
+
BoostedRulesModel,
|
10
|
+
CatBoostModel,
|
11
|
+
FastTextModel,
|
12
|
+
FigsModel,
|
13
|
+
FTTransformerModel,
|
14
|
+
GreedyTreeModel,
|
15
|
+
HSTreeModel,
|
16
|
+
ImagePredictorModel,
|
17
|
+
KNNModel,
|
18
|
+
LGBModel,
|
19
|
+
LinearModel,
|
20
|
+
MultiModalPredictorModel,
|
21
|
+
NNFastAiTabularModel,
|
22
|
+
RFModel,
|
23
|
+
RuleFitModel,
|
24
|
+
TabPFNMixModel,
|
25
|
+
TabPFNModel,
|
26
|
+
TabularNeuralNetTorchModel,
|
27
|
+
TextPredictorModel,
|
28
|
+
VowpalWabbitModel,
|
29
|
+
XGBoostModel,
|
30
|
+
XTModel,
|
31
|
+
)
|
32
|
+
from ..models.tab_transformer.tab_transformer_model import TabTransformerModel
|
33
|
+
|
34
|
+
|
35
|
+
# When adding a new model officially to AutoGluon, the model class should be added to the bottom of this list.
|
36
|
+
REGISTERED_MODEL_CLS_LST = [
|
37
|
+
RFModel,
|
38
|
+
XTModel,
|
39
|
+
KNNModel,
|
40
|
+
LGBModel,
|
41
|
+
CatBoostModel,
|
42
|
+
XGBoostModel,
|
43
|
+
TabularNeuralNetTorchModel,
|
44
|
+
LinearModel,
|
45
|
+
NNFastAiTabularModel,
|
46
|
+
TabTransformerModel,
|
47
|
+
TextPredictorModel,
|
48
|
+
ImagePredictorModel,
|
49
|
+
MultiModalPredictorModel,
|
50
|
+
FTTransformerModel,
|
51
|
+
TabPFNModel,
|
52
|
+
TabPFNMixModel,
|
53
|
+
FastTextModel,
|
54
|
+
VowpalWabbitModel,
|
55
|
+
GreedyWeightedEnsembleModel,
|
56
|
+
SimpleWeightedEnsembleModel,
|
57
|
+
RuleFitModel,
|
58
|
+
GreedyTreeModel,
|
59
|
+
FigsModel,
|
60
|
+
HSTreeModel,
|
61
|
+
BoostedRulesModel,
|
62
|
+
DummyModel,
|
63
|
+
]
|
64
|
+
|
65
|
+
# TODO: Replace logic in `autogluon.tabular.trainer.model_presets.presets` with `ag_model_register`
|
66
|
+
ag_model_register = ModelRegister(model_cls_list=REGISTERED_MODEL_CLS_LST)
|
@@ -0,0 +1,146 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from typing import Type
|
4
|
+
|
5
|
+
import pandas as pd
|
6
|
+
|
7
|
+
from autogluon.core.models import AbstractModel
|
8
|
+
|
9
|
+
|
10
|
+
# TODO: Move to core? Maybe TimeSeries can reuse?
|
11
|
+
# TODO: Use this / refer to this in the custom model tutorial
|
12
|
+
# TODO: Add to documentation website
|
13
|
+
# TODO: Test register logic in AG
|
14
|
+
class ModelRegister:
|
15
|
+
"""
|
16
|
+
ModelRegister keeps track of all known model classes to AutoGluon.
|
17
|
+
It can provide information such as:
|
18
|
+
What model classes and keys are valid to specify in an AutoGluon predictor fit call.
|
19
|
+
What a model's name is.
|
20
|
+
What a model's key is (such as the key specified by the user in `hyperparameters` to refer to a specific model type).
|
21
|
+
What a model's priority is (aka which order to fit a list of models).
|
22
|
+
|
23
|
+
Additionally, users can register custom models to AutoGluon so the key is recognized in `hyperparameters` and is treated with the proper priority and name.
|
24
|
+
They can register new models via `ModelRegister.add(model_cls)`.
|
25
|
+
|
26
|
+
Therefore, if a user creates a custom model `MyCustomModel` that inherits from `AbstractModel`, they can set the class attributes in `MyCustomModel`:
|
27
|
+
ag_key: The string key that can be specified in `hyperparameters`. Example: "GBM" for LGBModel
|
28
|
+
ag_name: The string name that is used in logging and accessing the model. Example: "LightGBM" for LGBModel
|
29
|
+
ag_priority: The int priority that is used to order the fitting of models. Higher values will be fit before lower values. Default 0. Example: 90 for LGBModel
|
30
|
+
ag_priority_to_problem_type: A dictionary of problem_type to priority that overrides `ag_priority` if specified for a given problem_type. Optional.
|
31
|
+
|
32
|
+
Then they can say `ag_model_register.add(MyCustomModel)`.
|
33
|
+
Assuming MyCustomModel.ag_key = "MY_MODEL", they can now do:
|
34
|
+
```
|
35
|
+
predictor.fit(..., hyperparameters={"MY_MODEL": ...})
|
36
|
+
```
|
37
|
+
"""
|
38
|
+
def __init__(self, model_cls_list: list[Type[AbstractModel]] | None = None):
|
39
|
+
if model_cls_list is None:
|
40
|
+
model_cls_list = []
|
41
|
+
assert isinstance(model_cls_list, list)
|
42
|
+
self._model_cls_list = []
|
43
|
+
self._key_to_cls_map = dict()
|
44
|
+
for model_cls in model_cls_list:
|
45
|
+
self.add(model_cls)
|
46
|
+
|
47
|
+
def exists(self, model_cls: Type[AbstractModel]) -> bool:
|
48
|
+
return model_cls in self._model_cls_list
|
49
|
+
|
50
|
+
def add(self, model_cls: Type[AbstractModel]):
|
51
|
+
"""
|
52
|
+
Adds `model_cls` to the model register
|
53
|
+
"""
|
54
|
+
assert not self.exists(model_cls), f"Cannot add model_cls that is already registered: {model_cls}"
|
55
|
+
if model_cls.ag_key is None:
|
56
|
+
raise AssertionError(
|
57
|
+
f"Cannot add model_cls with `ag_key=None`. "
|
58
|
+
f"Ensure you set class attribute `ag_key` to a string for your model_cls: {model_cls}"
|
59
|
+
f'\n\tFor example, LightGBModel sets `ag_key = "GBM"`'
|
60
|
+
)
|
61
|
+
if model_cls.ag_name is None:
|
62
|
+
raise AssertionError(
|
63
|
+
f"Cannot add model_cls with `ag_name=None`. "
|
64
|
+
f"Ensure you set class attribute `ag_name` to a string for your model_cls: {model_cls}"
|
65
|
+
f'\n\tFor example, LightGBModel sets `ag_name = "LightGBM"`'
|
66
|
+
)
|
67
|
+
assert isinstance(model_cls.ag_key, str)
|
68
|
+
assert isinstance(model_cls.ag_name, str)
|
69
|
+
assert isinstance(model_cls.ag_priority, int)
|
70
|
+
if model_cls.ag_key in self._key_to_cls_map:
|
71
|
+
raise AssertionError(
|
72
|
+
f"Cannot register a model class that shares a model key with an already registered model class."
|
73
|
+
f"\n`model_cls.ag_key` must be unique among registered models:"
|
74
|
+
f"\n\t New Class: {model_cls}"
|
75
|
+
f"\n\tConflicting Class: {self._key_to_cls_map[model_cls.ag_key]}"
|
76
|
+
f"\n\tConflicting ag_key: {model_cls.ag_key}"
|
77
|
+
)
|
78
|
+
self._model_cls_list.append(model_cls)
|
79
|
+
self._key_to_cls_map[model_cls.ag_key] = model_cls
|
80
|
+
|
81
|
+
def remove(self, model_cls: Type[AbstractModel]):
|
82
|
+
"""
|
83
|
+
Removes `model_cls` from the model register
|
84
|
+
"""
|
85
|
+
assert self.exists(model_cls), f"Cannot remove model_cls that isn't registered: {model_cls}"
|
86
|
+
self._model_cls_list = [m for m in self._model_cls_list if m != model_cls]
|
87
|
+
self._key_to_cls_map.pop(model_cls.ag_key)
|
88
|
+
|
89
|
+
@property
|
90
|
+
def model_cls_list(self) -> list[Type[AbstractModel]]:
|
91
|
+
return self._model_cls_list
|
92
|
+
|
93
|
+
@property
|
94
|
+
def keys(self) -> list[str]:
|
95
|
+
return [self.key(model_cls) for model_cls in self.model_cls_list]
|
96
|
+
|
97
|
+
def key_to_cls_map(self) -> dict[str, Type[AbstractModel]]:
|
98
|
+
return self._key_to_cls_map
|
99
|
+
|
100
|
+
def key_to_cls(self, key: str) -> Type[AbstractModel]:
|
101
|
+
if key not in self._key_to_cls_map:
|
102
|
+
raise ValueError(
|
103
|
+
f"No registered model exists with provided key: {key}"
|
104
|
+
f"\n\tValid keys: {list(self.key_to_cls_map().keys())}"
|
105
|
+
)
|
106
|
+
return self.key_to_cls_map()[key]
|
107
|
+
|
108
|
+
def priority_map(self, problem_type: str | None = None) -> dict[Type[AbstractModel], int]:
|
109
|
+
return {model_cls: self.priority(model_cls, problem_type=problem_type) for model_cls in self._model_cls_list}
|
110
|
+
|
111
|
+
def key(self, model_cls: Type[AbstractModel]) -> str:
|
112
|
+
assert self.exists(model_cls), f"Model class must be registered: {model_cls}"
|
113
|
+
return model_cls.ag_key
|
114
|
+
|
115
|
+
def name_map(self) -> dict[Type[AbstractModel], str]:
|
116
|
+
return {model_cls: model_cls.ag_name for model_cls in self._model_cls_list}
|
117
|
+
|
118
|
+
def name(self, model_cls: Type[AbstractModel]) -> str:
|
119
|
+
assert self.exists(model_cls), f"Model class must be registered: {model_cls}"
|
120
|
+
return model_cls.ag_name
|
121
|
+
|
122
|
+
def priority(self, model_cls: Type[AbstractModel], problem_type: str | None = None) -> int:
|
123
|
+
assert self.exists(model_cls), f"Model class must be registered: {model_cls}"
|
124
|
+
return model_cls.get_ag_priority(problem_type=problem_type)
|
125
|
+
|
126
|
+
def docstring(self, model_cls: Type[AbstractModel]) -> str:
|
127
|
+
assert self.exists(model_cls), f"Model class must be registered: {model_cls}"
|
128
|
+
return model_cls.__doc__
|
129
|
+
|
130
|
+
# TODO: Could add a lot of information here to track which features are supported for each model:
|
131
|
+
# ag.early_stop support
|
132
|
+
# refit_full support
|
133
|
+
# GPU support
|
134
|
+
# etc.
|
135
|
+
def to_frame(self) -> pd.DataFrame:
|
136
|
+
model_classes = self.model_cls_list
|
137
|
+
cls_dict = {}
|
138
|
+
for model_cls in model_classes:
|
139
|
+
cls_dict[self.key(model_cls)] = {
|
140
|
+
"model_cls": model_cls.__name__,
|
141
|
+
"ag_name": self.name(model_cls),
|
142
|
+
"ag_priority": self.priority(model_cls),
|
143
|
+
}
|
144
|
+
df = pd.DataFrame(cls_dict).T
|
145
|
+
df.index.name = "ag_key"
|
146
|
+
return df
|
@@ -128,6 +128,12 @@ class AbstractTabularTrainer(AbstractTrainer[AbstractModel]):
|
|
128
128
|
Higher levels correspond to more detailed print statements (you can set verbosity = 0 to suppress warnings).
|
129
129
|
If using logging, you can alternatively control amount of information printed via `logger.setLevel(L)`,
|
130
130
|
where `L` ranges from 0 to 50 (Note: higher values of `L` correspond to fewer print statements, opposite of verbosity levels).
|
131
|
+
raise_on_model_failure : bool, default = False
|
132
|
+
If True, Trainer will raise on any exception during model training.
|
133
|
+
This is ideal when using a debugger during development.
|
134
|
+
If False, Trainer will try to skip to the next model if an exception occurred during model training.
|
135
|
+
|
136
|
+
.. versionadded:: 1.3.0
|
131
137
|
"""
|
132
138
|
|
133
139
|
distill_stackname = "distill" # name of stack-level for distilled student models
|
@@ -149,6 +155,7 @@ class AbstractTabularTrainer(AbstractTrainer[AbstractModel]):
|
|
149
155
|
save_data: bool = False,
|
150
156
|
random_state: int = 0,
|
151
157
|
verbosity: int = 2,
|
158
|
+
raise_on_model_failure: bool = False,
|
152
159
|
):
|
153
160
|
super().__init__(
|
154
161
|
path=path,
|
@@ -163,6 +170,7 @@ class AbstractTabularTrainer(AbstractTrainer[AbstractModel]):
|
|
163
170
|
#: Integer value added to the stack level to get the random_state for kfold splits or the train/val split if bagging is disabled
|
164
171
|
self.random_state = random_state
|
165
172
|
self.verbosity = verbosity
|
173
|
+
self.raise_on_model_failure = raise_on_model_failure
|
166
174
|
|
167
175
|
# TODO: consider redesign where Trainer doesn't need sample_weight column name and weights are separate from X
|
168
176
|
self.sample_weight = sample_weight
|
@@ -2188,6 +2196,10 @@ class AbstractTabularTrainer(AbstractTrainer[AbstractModel]):
|
|
2188
2196
|
# TODO: Add recursive=True to avoid repeatedly loading models each time this is called for bagged ensembles (especially during repeated bagging)
|
2189
2197
|
self.save_model(model=model)
|
2190
2198
|
except Exception as exc:
|
2199
|
+
if self.raise_on_model_failure:
|
2200
|
+
# immediately raise instead of skipping to next model, useful for debugging during development
|
2201
|
+
logger.warning("Model failure occurred... Raising exception instead of continuing to next model. (raise_on_model_failure=True)")
|
2202
|
+
raise exc
|
2191
2203
|
exception = exc # required to reference exc outside of `except` statement
|
2192
2204
|
del_model = True
|
2193
2205
|
if isinstance(exception, TimeLimitExceeded):
|
@@ -1,3 +1,5 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
1
3
|
import copy
|
2
4
|
import inspect
|
3
5
|
import logging
|
@@ -22,70 +24,16 @@ from autogluon.core.constants import (
|
|
22
24
|
)
|
23
25
|
from autogluon.core.models import (
|
24
26
|
AbstractModel,
|
25
|
-
DummyModel,
|
26
|
-
GreedyWeightedEnsembleModel,
|
27
|
-
SimpleWeightedEnsembleModel,
|
28
27
|
StackerEnsembleModel,
|
29
28
|
)
|
30
29
|
from autogluon.core.trainer.utils import process_hyperparameters
|
31
30
|
|
32
|
-
from ...
|
33
|
-
BoostedRulesModel,
|
34
|
-
CatBoostModel,
|
35
|
-
FastTextModel,
|
36
|
-
FigsModel,
|
37
|
-
FTTransformerModel,
|
38
|
-
GreedyTreeModel,
|
39
|
-
HSTreeModel,
|
40
|
-
ImagePredictorModel,
|
41
|
-
KNNModel,
|
42
|
-
LGBModel,
|
43
|
-
LinearModel,
|
44
|
-
MultiModalPredictorModel,
|
45
|
-
NNFastAiTabularModel,
|
46
|
-
RFModel,
|
47
|
-
RuleFitModel,
|
48
|
-
TabPFNMixModel,
|
49
|
-
TabPFNModel,
|
50
|
-
TabularNeuralNetTorchModel,
|
51
|
-
TextPredictorModel,
|
52
|
-
VowpalWabbitModel,
|
53
|
-
XGBoostModel,
|
54
|
-
XTModel,
|
55
|
-
)
|
56
|
-
from ...models.tab_transformer.tab_transformer_model import TabTransformerModel
|
31
|
+
from ...register import ag_model_register
|
57
32
|
from ...version import __version__
|
58
33
|
|
59
34
|
logger = logging.getLogger(__name__)
|
60
35
|
|
61
|
-
#
|
62
|
-
DEFAULT_MODEL_PRIORITY = dict(
|
63
|
-
TABPFN=110, # highest priority due to its very fast training time
|
64
|
-
KNN=100,
|
65
|
-
GBM=90,
|
66
|
-
RF=80,
|
67
|
-
CAT=70,
|
68
|
-
XT=60,
|
69
|
-
FASTAI=50,
|
70
|
-
TABPFNMIX=45,
|
71
|
-
XGB=40,
|
72
|
-
LR=30,
|
73
|
-
NN_TORCH=25,
|
74
|
-
VW=10,
|
75
|
-
FASTTEXT=0,
|
76
|
-
AG_TEXT_NN=0,
|
77
|
-
AG_IMAGE_NN=0,
|
78
|
-
AG_AUTOMM=0,
|
79
|
-
TRANSF=0,
|
80
|
-
custom=0,
|
81
|
-
# interpretable models
|
82
|
-
IM_RULEFIT=0,
|
83
|
-
IM_GREEDYTREE=0,
|
84
|
-
IM_FIGS=0,
|
85
|
-
IM_HSTREE=0,
|
86
|
-
IM_BOOSTEDRULES=0,
|
87
|
-
)
|
88
|
-
|
36
|
+
# TODO: Replace with ag_model_register
|
89
37
|
# Problem type specific model priority overrides (will update default values in DEFAULT_MODEL_PRIORITY)
|
90
38
|
PROBLEM_TYPE_MODEL_PRIORITY = {
|
91
39
|
MULTICLASS: dict(
|
@@ -93,6 +41,7 @@ PROBLEM_TYPE_MODEL_PRIORITY = {
|
|
93
41
|
),
|
94
42
|
}
|
95
43
|
|
44
|
+
# TODO: Replace with ag_model_register
|
96
45
|
DEFAULT_SOFTCLASS_PRIORITY = dict(
|
97
46
|
GBM=100,
|
98
47
|
RF=80,
|
@@ -102,66 +51,11 @@ DEFAULT_SOFTCLASS_PRIORITY = dict(
|
|
102
51
|
|
103
52
|
DEFAULT_CUSTOM_MODEL_PRIORITY = 0
|
104
53
|
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
CAT=CatBoostModel,
|
111
|
-
XGB=XGBoostModel,
|
112
|
-
NN_TORCH=TabularNeuralNetTorchModel,
|
113
|
-
LR=LinearModel,
|
114
|
-
FASTAI=NNFastAiTabularModel,
|
115
|
-
TRANSF=TabTransformerModel,
|
116
|
-
AG_TEXT_NN=TextPredictorModel,
|
117
|
-
AG_IMAGE_NN=ImagePredictorModel,
|
118
|
-
AG_AUTOMM=MultiModalPredictorModel,
|
119
|
-
FT_TRANSFORMER=FTTransformerModel,
|
120
|
-
TABPFN=TabPFNModel,
|
121
|
-
TABPFNMIX=TabPFNMixModel,
|
122
|
-
FASTTEXT=FastTextModel,
|
123
|
-
ENS_WEIGHTED=GreedyWeightedEnsembleModel,
|
124
|
-
SIMPLE_ENS_WEIGHTED=SimpleWeightedEnsembleModel,
|
125
|
-
# interpretable models
|
126
|
-
IM_RULEFIT=RuleFitModel,
|
127
|
-
IM_GREEDYTREE=GreedyTreeModel,
|
128
|
-
IM_FIGS=FigsModel,
|
129
|
-
IM_HSTREE=HSTreeModel,
|
130
|
-
IM_BOOSTEDRULES=BoostedRulesModel,
|
131
|
-
VW=VowpalWabbitModel,
|
132
|
-
DUMMY=DummyModel,
|
133
|
-
)
|
134
|
-
|
135
|
-
|
136
|
-
# TODO: v1.0 Have this be defined in the model class
|
137
|
-
DEFAULT_MODEL_NAMES = {
|
138
|
-
RFModel: "RandomForest",
|
139
|
-
XTModel: "ExtraTrees",
|
140
|
-
KNNModel: "KNeighbors",
|
141
|
-
LGBModel: "LightGBM",
|
142
|
-
CatBoostModel: "CatBoost",
|
143
|
-
XGBoostModel: "XGBoost",
|
144
|
-
TabularNeuralNetTorchModel: "NeuralNetTorch",
|
145
|
-
LinearModel: "LinearModel",
|
146
|
-
NNFastAiTabularModel: "NeuralNetFastAI",
|
147
|
-
TabTransformerModel: "Transformer",
|
148
|
-
TextPredictorModel: "TextPredictor",
|
149
|
-
ImagePredictorModel: "ImagePredictor",
|
150
|
-
MultiModalPredictorModel: "MultiModalPredictor",
|
151
|
-
FTTransformerModel: "FTTransformer",
|
152
|
-
TabPFNModel: "TabPFN",
|
153
|
-
TabPFNMixModel: "TabPFNMix",
|
154
|
-
FastTextModel: "FastText",
|
155
|
-
VowpalWabbitModel: "VowpalWabbit",
|
156
|
-
GreedyWeightedEnsembleModel: "WeightedEnsemble",
|
157
|
-
SimpleWeightedEnsembleModel: "WeightedEnsemble",
|
158
|
-
# Interpretable models
|
159
|
-
RuleFitModel: "RuleFit",
|
160
|
-
GreedyTreeModel: "GreedyTree",
|
161
|
-
FigsModel: "Figs",
|
162
|
-
HSTreeModel: "HierarchicalShrinkageTree",
|
163
|
-
BoostedRulesModel: "BoostedRules",
|
164
|
-
}
|
54
|
+
# FIXME: Don't do this, use ag_model_register lazily so users can register custom models before calling fit
|
55
|
+
DEFAULT_MODEL_PRIORITY = {ag_model_register.key(model_cls): ag_model_register.priority(model_cls) for model_cls in ag_model_register.model_cls_list}
|
56
|
+
DEFAULT_MODEL_NAMES = ag_model_register.name_map()
|
57
|
+
REGISTERED_MODEL_CLS_LST = ag_model_register.model_cls_list
|
58
|
+
MODEL_TYPES = ag_model_register.key_to_cls_map()
|
165
59
|
|
166
60
|
|
167
61
|
VALID_AG_ARGS_KEYS = {
|
autogluon/tabular/version.py
CHANGED
{autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/METADATA
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: autogluon.tabular
|
3
|
-
Version: 1.2.
|
3
|
+
Version: 1.2.1b20250227
|
4
4
|
Summary: Fast and Accurate ML in 3 Lines of Code
|
5
5
|
Home-page: https://github.com/autogluon/autogluon
|
6
6
|
Author: AutoGluon Community
|
@@ -41,19 +41,19 @@ Requires-Dist: scipy<1.16,>=1.5.4
|
|
41
41
|
Requires-Dist: pandas<2.3.0,>=2.0.0
|
42
42
|
Requires-Dist: scikit-learn<1.5.3,>=1.4.0
|
43
43
|
Requires-Dist: networkx<4,>=3.0
|
44
|
-
Requires-Dist: autogluon.core==1.2.
|
45
|
-
Requires-Dist: autogluon.features==1.2.
|
44
|
+
Requires-Dist: autogluon.core==1.2.1b20250227
|
45
|
+
Requires-Dist: autogluon.features==1.2.1b20250227
|
46
46
|
Provides-Extra: all
|
47
|
-
Requires-Dist: torch<2.6,>=2.2; extra == "all"
|
48
|
-
Requires-Dist: spacy<3.8; extra == "all"
|
49
|
-
Requires-Dist: huggingface-hub[torch]; extra == "all"
|
50
|
-
Requires-Dist: lightgbm<4.6,>=4.0; extra == "all"
|
51
|
-
Requires-Dist: einops<0.9,>=0.7; extra == "all"
|
52
|
-
Requires-Dist: autogluon.core[all]==1.2.1b20250225; extra == "all"
|
53
|
-
Requires-Dist: catboost<1.3,>=1.2; extra == "all"
|
54
|
-
Requires-Dist: xgboost<2.2,>=1.6; extra == "all"
|
55
47
|
Requires-Dist: numpy<2.0.0,>=1.25; extra == "all"
|
56
48
|
Requires-Dist: fastai<2.8,>=2.3.1; extra == "all"
|
49
|
+
Requires-Dist: xgboost<2.2,>=1.6; extra == "all"
|
50
|
+
Requires-Dist: catboost<1.3,>=1.2; extra == "all"
|
51
|
+
Requires-Dist: einops<0.9,>=0.7; extra == "all"
|
52
|
+
Requires-Dist: lightgbm<4.6,>=4.0; extra == "all"
|
53
|
+
Requires-Dist: huggingface-hub[torch]; extra == "all"
|
54
|
+
Requires-Dist: torch<2.6,>=2.2; extra == "all"
|
55
|
+
Requires-Dist: spacy<3.8; extra == "all"
|
56
|
+
Requires-Dist: autogluon.core[all]==1.2.1b20250227; extra == "all"
|
57
57
|
Provides-Extra: catboost
|
58
58
|
Requires-Dist: numpy<2.0.0,>=1.25; extra == "catboost"
|
59
59
|
Requires-Dist: catboost<1.3,>=1.2; extra == "catboost"
|
@@ -66,7 +66,7 @@ Requires-Dist: imodels<1.4.0,>=1.3.10; extra == "imodels"
|
|
66
66
|
Provides-Extra: lightgbm
|
67
67
|
Requires-Dist: lightgbm<4.6,>=4.0; extra == "lightgbm"
|
68
68
|
Provides-Extra: ray
|
69
|
-
Requires-Dist: autogluon.core[all]==1.2.
|
69
|
+
Requires-Dist: autogluon.core[all]==1.2.1b20250227; extra == "ray"
|
70
70
|
Provides-Extra: skex
|
71
71
|
Requires-Dist: scikit-learn-intelex<2025.1,>=2024.0; extra == "skex"
|
72
72
|
Provides-Extra: skl2onnx
|
{autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/RECORD
RENAMED
@@ -1,6 +1,6 @@
|
|
1
|
-
autogluon.tabular-1.2.
|
1
|
+
autogluon.tabular-1.2.1b20250227-py3.9-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
|
2
2
|
autogluon/tabular/__init__.py,sha256=2OXpJCvENRHubBTYNIPpHX93WWuFZzsJBtTZbNVHVas,400
|
3
|
-
autogluon/tabular/version.py,sha256=
|
3
|
+
autogluon/tabular/version.py,sha256=5ozpv_b_rM463xTXRdXij1xDVDTJr5oIBlgIcEiF2lw,91
|
4
4
|
autogluon/tabular/configs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
5
|
autogluon/tabular/configs/config_helper.py,sha256=Pb2aW9Z9w77pYKPRVZ3nBzHY3KJaiEJSJ747zZcJIVk,21132
|
6
6
|
autogluon/tabular/configs/feature_generator_presets.py,sha256=EV5Ym8VW15q92MwOUpTi7wZFS2QooM51fLg3RdUsn-M,1223
|
@@ -14,18 +14,18 @@ autogluon/tabular/experimental/_tabular_classifier.py,sha256=7lGoFdvkHiZS3VpcXo9
|
|
14
14
|
autogluon/tabular/experimental/_tabular_regressor.py,sha256=EzEDL-19T5QUVNmLkSHNzzGwYrUxyqlNpIDPMgtV6Gg,1932
|
15
15
|
autogluon/tabular/experimental/plot_leaderboard.py,sha256=BN_kB-zmOZNUYWyI7z9pF67GCV20zo8yV51HKKj1SCY,9481
|
16
16
|
autogluon/tabular/learner/__init__.py,sha256=Hhmk5WpKQHohVmI-veOaKMelKJpIdzeXrmw_DPn3DTU,63
|
17
|
-
autogluon/tabular/learner/abstract_learner.py,sha256=
|
18
|
-
autogluon/tabular/learner/default_learner.py,sha256=
|
17
|
+
autogluon/tabular/learner/abstract_learner.py,sha256=HmnW7KO3sV4H1QSquJn9DYOwoWa2vohKThq_hr4OHM4,55102
|
18
|
+
autogluon/tabular/learner/default_learner.py,sha256=hjdKbcFtIQxQ3-k1LiGOo-w5sLxIIQAyFLs3-R35aw0,24781
|
19
19
|
autogluon/tabular/models/__init__.py,sha256=tDVqwVG9q2ctLWRouyXeYs5NiSBnOnwh3anAfZyM3jg,1099
|
20
20
|
autogluon/tabular/models/_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
21
21
|
autogluon/tabular/models/_utils/rapids_utils.py,sha256=gbej9Hjn4alCWZuGN9sOLXMMAyWbgHPThTsp2feS39o,1038
|
22
22
|
autogluon/tabular/models/_utils/torch_utils.py,sha256=dxs_KMMAOmNkRNjYf_hrzqaHIfkqn1xoKRKqCFbQ1Rk,537
|
23
23
|
autogluon/tabular/models/automm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
|
-
autogluon/tabular/models/automm/automm_model.py,sha256=
|
25
|
-
autogluon/tabular/models/automm/ft_transformer.py,sha256=
|
24
|
+
autogluon/tabular/models/automm/automm_model.py,sha256=_FPkXiHjowuc_KTKVkrzstaj1pecPDxIAZfw0LVjbJ0,11220
|
25
|
+
autogluon/tabular/models/automm/ft_transformer.py,sha256=8T80u3SbWr2-0KagOXhYpZkTvFIbRF91rE8sVZN0w_0,3901
|
26
26
|
autogluon/tabular/models/catboost/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
27
27
|
autogluon/tabular/models/catboost/callbacks.py,sha256=l8x17n_w7oEFs-iDECSdBKZ89yW5g1z-zvj4XLgQPkw,7098
|
28
|
-
autogluon/tabular/models/catboost/catboost_model.py,sha256=
|
28
|
+
autogluon/tabular/models/catboost/catboost_model.py,sha256=lzHsaY-EuihoSjZ7owVqAfzmdT5VVlMTQeIZ8_PJcxY,17225
|
29
29
|
autogluon/tabular/models/catboost/catboost_softclass_utils.py,sha256=UiW0SUb3hFueW5qYtQn6Sbk7Wg7BWN4jqKWeFtbMvgU,3919
|
30
30
|
autogluon/tabular/models/catboost/catboost_utils.py,sha256=YSc94V4DjrwbmkeUM8306zV7z21oq-K-qGCOj0UE_wg,3167
|
31
31
|
autogluon/tabular/models/catboost/hyperparameters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -36,39 +36,39 @@ autogluon/tabular/models/fastainn/callbacks.py,sha256=3WvOEwqd1YAVInooKsFOTzAkCL
|
|
36
36
|
autogluon/tabular/models/fastainn/fastai_helpers.py,sha256=gGYzyrAFl8hi8GnsemZNLGZn5xr7cyJXdFl08PIlza4,1393
|
37
37
|
autogluon/tabular/models/fastainn/imports_helper.py,sha256=ICxA8ty47-oZu0Q9AjKCQe8uVi340Iu0NFruxvJPrbA,330
|
38
38
|
autogluon/tabular/models/fastainn/quantile_helpers.py,sha256=d89GKvSRBgOy9EqcDI83MK5sqPRxP6JJ3BmPLmKnB0o,1808
|
39
|
-
autogluon/tabular/models/fastainn/tabular_nn_fastai.py,sha256=
|
39
|
+
autogluon/tabular/models/fastainn/tabular_nn_fastai.py,sha256=SsBtfYRFuBXrN7LaCaJqLSLd1ZLVad5SXtHa_pxdG1w,29282
|
40
40
|
autogluon/tabular/models/fastainn/hyperparameters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
41
41
|
autogluon/tabular/models/fastainn/hyperparameters/parameters.py,sha256=DkQwAZZ7CuODKoljr-yrkx-uFxBSPRxkKuvPdwO-UhQ,2069
|
42
42
|
autogluon/tabular/models/fastainn/hyperparameters/searchspaces.py,sha256=5qdknZDrHtdPdrhSqjamYQrCxvupXvlN3bVGEPgs48E,1660
|
43
43
|
autogluon/tabular/models/fasttext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
44
|
-
autogluon/tabular/models/fasttext/fasttext_model.py,sha256=
|
44
|
+
autogluon/tabular/models/fasttext/fasttext_model.py,sha256=0_Dbjz0_rzrRtBKVALlGZ8khnqeO5hPsvmZHi_ktv78,7090
|
45
45
|
autogluon/tabular/models/fasttext/hyperparameters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
46
46
|
autogluon/tabular/models/fasttext/hyperparameters/parameters.py,sha256=DbkLlHlxRh1uGWJ_sUYNrweSJj4yjlOBH_H2COyaWL8,1234
|
47
47
|
autogluon/tabular/models/image_prediction/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
48
|
-
autogluon/tabular/models/image_prediction/image_predictor.py,sha256=
|
48
|
+
autogluon/tabular/models/image_prediction/image_predictor.py,sha256=2KBxnFzw-wAIjKYpJkmwKfsB_laUpWbY5HOxyDL6uqQ,5454
|
49
49
|
autogluon/tabular/models/imodels/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
50
|
-
autogluon/tabular/models/imodels/imodels_models.py,sha256=
|
50
|
+
autogluon/tabular/models/imodels/imodels_models.py,sha256=JE-VFeFc8Ot2T1IUtlNMP4_T-9F1I1NzQt4zA5aZ-to,4712
|
51
51
|
autogluon/tabular/models/knn/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
52
52
|
autogluon/tabular/models/knn/_knn_loo_variants.py,sha256=-n2znYS7OBA0bZvtei6JZiEMRWp4GX-Qp64uheaHyhQ,4562
|
53
|
-
autogluon/tabular/models/knn/knn_model.py,sha256=
|
53
|
+
autogluon/tabular/models/knn/knn_model.py,sha256=fN8ybCC3zb9aVXS18M41RS78u58bvEvrv3ImELY_WOg,13874
|
54
54
|
autogluon/tabular/models/knn/knn_rapids_model.py,sha256=0FFApNZFH8nyrDqlBSUV7jO-2fLe0-h_UHp1GsyQJ8E,1550
|
55
55
|
autogluon/tabular/models/knn/knn_utils.py,sha256=XU1cxVXp1BAoQnja2_KmSIn9_q9gZkjAya7-9b0uStk,7455
|
56
56
|
autogluon/tabular/models/lgb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
57
57
|
autogluon/tabular/models/lgb/callbacks.py,sha256=0X42-nAbftKnu_zmFPDf8S3RrUJJjsJ1Qs_TPAJxzjU,11367
|
58
|
-
autogluon/tabular/models/lgb/lgb_model.py,sha256=
|
58
|
+
autogluon/tabular/models/lgb/lgb_model.py,sha256=BKS9zMfj65Xnp74IK2IU_i9RfY4dWmiJxGqp7IiyWCQ,24898
|
59
59
|
autogluon/tabular/models/lgb/lgb_utils.py,sha256=jzTDTzP-z7gcBGZyy1_0YkyTOLbU5DLeRqtil4FCZPI,7382
|
60
60
|
autogluon/tabular/models/lgb/hyperparameters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
61
61
|
autogluon/tabular/models/lgb/hyperparameters/parameters.py,sha256=LLEQ-Ns3HElWBsFJx3ogRV7L6qw_nXlcl7EyO0C0fVQ,1336
|
62
62
|
autogluon/tabular/models/lgb/hyperparameters/searchspaces.py,sha256=tvNNR7niWz_B-PndYQXb6vVNABxSfBYRHj6ZVQJ1x2E,1930
|
63
63
|
autogluon/tabular/models/lr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
64
|
-
autogluon/tabular/models/lr/lr_model.py,sha256=
|
64
|
+
autogluon/tabular/models/lr/lr_model.py,sha256=t2rZJf47bGJ5YlGrg100-LpAhNIPXE2KwaxDTyHxgSs,15433
|
65
65
|
autogluon/tabular/models/lr/lr_preprocessing_utils.py,sha256=zkmVZtv05BQPDasVBz1J8LmXEfLgoggsv57s6cXuTMQ,1094
|
66
66
|
autogluon/tabular/models/lr/lr_rapids_model.py,sha256=a07JvjWemrL0L08moA3K4lnYieukRlAdb2Z_uWA44k8,2127
|
67
67
|
autogluon/tabular/models/lr/hyperparameters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
68
68
|
autogluon/tabular/models/lr/hyperparameters/parameters.py,sha256=Hr5YC13zjbt3CfCbzGj8iXUIuDn-Q7FvDT2uSuiSVlM,1414
|
69
69
|
autogluon/tabular/models/lr/hyperparameters/searchspaces.py,sha256=Igywc-B6qJ9EBLdasrDhW-Ot5FGirIzbXLwv5HRe5Xo,276
|
70
70
|
autogluon/tabular/models/rf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
71
|
-
autogluon/tabular/models/rf/rf_model.py,sha256=
|
71
|
+
autogluon/tabular/models/rf/rf_model.py,sha256=U9prO0B3Uk7Ycvagau7rgm_fNkBaJS1Km40iY9XMa3I,21342
|
72
72
|
autogluon/tabular/models/rf/rf_quantile.py,sha256=2S8FE8po9lMnZaeKuVkzOUFOcdil46ZbFqm49OuvNZY,36460
|
73
73
|
autogluon/tabular/models/rf/rf_rapids_model.py,sha256=3s-8M11dzCl_2Lu5iB3H8YjHLgyP_SElrm_4w_HfmqY,2028
|
74
74
|
autogluon/tabular/models/rf/compilers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -80,15 +80,15 @@ autogluon/tabular/models/tab_transformer/pretexts.py,sha256=UEoDq_8hLKbY7EbE5IyL
|
|
80
80
|
autogluon/tabular/models/tab_transformer/tab_model_base.py,sha256=4rmY1IrwoFuJejy-9gOoYSz-ar3DvZY8uXyDUBKk7Iw,3615
|
81
81
|
autogluon/tabular/models/tab_transformer/tab_transformer.py,sha256=1c1oTJfSsGxQjzZJVN8doqFmYV-Wwwbqcu7RcW77kJk,6991
|
82
82
|
autogluon/tabular/models/tab_transformer/tab_transformer_encoder.py,sha256=v2G1S_MSESzKqtvSfxS5uEse2CWtOn_K2E-uIwuE6zI,24701
|
83
|
-
autogluon/tabular/models/tab_transformer/tab_transformer_model.py,sha256=
|
83
|
+
autogluon/tabular/models/tab_transformer/tab_transformer_model.py,sha256=E_qPm2AMZY3I7EdKqPD5APKDbat5S-A7ohKaXAO-29w,22752
|
84
84
|
autogluon/tabular/models/tab_transformer/utils.py,sha256=rrNk0X6Y0vzt7ivylRmTf0zjDx1DIF-5_ibf6B9Taz8,4554
|
85
85
|
autogluon/tabular/models/tab_transformer/hyperparameters/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
86
86
|
autogluon/tabular/models/tab_transformer/hyperparameters/parameters.py,sha256=-vJRG8PVj5FgQnF9FNJHMvoIzzyazGE4XLRyQKL5VT8,3854
|
87
87
|
autogluon/tabular/models/tab_transformer/hyperparameters/searchspaces.py,sha256=poiOFwOVIf1ONcPIjOqsA31YbqBgWxy0DlVFpVwKNHM,650
|
88
88
|
autogluon/tabular/models/tabpfn/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
89
|
-
autogluon/tabular/models/tabpfn/tabpfn_model.py,sha256=
|
89
|
+
autogluon/tabular/models/tabpfn/tabpfn_model.py,sha256=xdxBq9Bg4e8Cg6Y7jfUEwWr60mM7OvSOOLN0usrUn0I,6917
|
90
90
|
autogluon/tabular/models/tabpfnmix/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
91
|
-
autogluon/tabular/models/tabpfnmix/tabpfnmix_model.py,sha256=
|
91
|
+
autogluon/tabular/models/tabpfnmix/tabpfnmix_model.py,sha256=mc3mMRwytBYaxXjnnrlAQyS2dowT9sRd4xfvEkJfCW8,16765
|
92
92
|
autogluon/tabular/models/tabpfnmix/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
93
93
|
autogluon/tabular/models/tabpfnmix/_internal/tabpfnmix_classifier.py,sha256=U3DAhtSLpHUu-22GgR3QCZJnpRYpOW95XoTV7tE9J5Y,3425
|
94
94
|
autogluon/tabular/models/tabpfnmix/_internal/tabpfnmix_regressor.py,sha256=J6JvrK6L6y3s-Ah6sHQdjSK0mwAMP-Wy3RRBwzB0AoA,3196
|
@@ -121,7 +121,7 @@ autogluon/tabular/models/tabular_nn/hyperparameters/__init__.py,sha256=47DEQpj8H
|
|
121
121
|
autogluon/tabular/models/tabular_nn/hyperparameters/parameters.py,sha256=Z3t_U1f7jfolPey6lzqgJyoFbVgoncFNSvCKXSuLxeU,6465
|
122
122
|
autogluon/tabular/models/tabular_nn/hyperparameters/searchspaces.py,sha256=pT9cJ3MaWPnaQwAf47Yz6f0-L9qDBknahERbggAp52U,2810
|
123
123
|
autogluon/tabular/models/tabular_nn/torch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
124
|
-
autogluon/tabular/models/tabular_nn/torch/tabular_nn_torch.py,sha256=
|
124
|
+
autogluon/tabular/models/tabular_nn/torch/tabular_nn_torch.py,sha256=QxZZ9XILnO4VdZ3avNNaPV7K16OsINq4SAWN9W9O3o4,42400
|
125
125
|
autogluon/tabular/models/tabular_nn/torch/tabular_torch_dataset.py,sha256=RdnQGZSrvY1iuJB4JTANniH3Dorw-DP0Em_JK3_h7RM,13497
|
126
126
|
autogluon/tabular/models/tabular_nn/torch/torch_network_modules.py,sha256=Qc3PwXTD8A7PgXi6EGuaBCrN3jsFAXDLCW7i6tE5wYI,11338
|
127
127
|
autogluon/tabular/models/tabular_nn/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -129,35 +129,38 @@ autogluon/tabular/models/tabular_nn/utils/categorical_encoders.py,sha256=uLQaHku
|
|
129
129
|
autogluon/tabular/models/tabular_nn/utils/data_preprocessor.py,sha256=ypXqtxdt1qH6la1hcq-BJ0dzQBNtgKY-BjXmIWxPjCg,5237
|
130
130
|
autogluon/tabular/models/tabular_nn/utils/nn_architecture_utils.py,sha256=tttzR5EtYcFa6sIrUG9wyegdYmYE5DPK_CiLF1-L3c8,2875
|
131
131
|
autogluon/tabular/models/text_prediction/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
132
|
-
autogluon/tabular/models/text_prediction/text_prediction_v1_model.py,sha256=
|
132
|
+
autogluon/tabular/models/text_prediction/text_prediction_v1_model.py,sha256=XCKKb9N3mKXLtbMHMjQ3y5HwKQFF25enA287WHv-ebc,924
|
133
133
|
autogluon/tabular/models/vowpalwabbit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
134
|
-
autogluon/tabular/models/vowpalwabbit/vowpalwabbit_model.py,sha256=
|
134
|
+
autogluon/tabular/models/vowpalwabbit/vowpalwabbit_model.py,sha256=hToCVe0KP3PIkOl3-jEh0R1m7_9xwGXMY8RRH2bzXnM,11699
|
135
135
|
autogluon/tabular/models/vowpalwabbit/vowpalwabbit_utils.py,sha256=jZ0STjvqwKw8jJDeoo5yAXTvgwFvY8Fsz6OqSif_JGI,3677
|
136
136
|
autogluon/tabular/models/xgboost/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
137
137
|
autogluon/tabular/models/xgboost/callbacks.py,sha256=uynimXya07XQMBkDvec-7mXK6OfMGP6M8MiVYu8OVRI,7008
|
138
|
-
autogluon/tabular/models/xgboost/xgboost_model.py,sha256=
|
138
|
+
autogluon/tabular/models/xgboost/xgboost_model.py,sha256=4hMABSzg3CJnpWwiS3A7qhzWpNcC6BWyVAoC2MMr-TI,14322
|
139
139
|
autogluon/tabular/models/xgboost/xgboost_utils.py,sha256=FVqZ8h4JAe_pifSvNx83cLZHwsuzTXylrrcan07AoNo,5757
|
140
140
|
autogluon/tabular/models/xgboost/hyperparameters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
141
141
|
autogluon/tabular/models/xgboost/hyperparameters/parameters.py,sha256=ay6bVVpiPzftbtz6TTS76w7j4vjDjzHFpuf2Bjf6Zu4,1673
|
142
142
|
autogluon/tabular/models/xgboost/hyperparameters/searchspaces.py,sha256=lFwI34pcRtlVQkxmsdZsSaPry8t_WSMBhig4soMK54k,2140
|
143
143
|
autogluon/tabular/models/xt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
144
|
-
autogluon/tabular/models/xt/xt_model.py,sha256=
|
144
|
+
autogluon/tabular/models/xt/xt_model.py,sha256=z9Hcp3CR_QS4XMlE9-g5rFknj2KPHfFLtyqb0qvOYbw,825
|
145
145
|
autogluon/tabular/predictor/__init__.py,sha256=zCMgjxQlWpDWnr1l1xjBCiK3rWC3N3RoD8UXBnazT74,107
|
146
146
|
autogluon/tabular/predictor/interpretable_predictor.py,sha256=5UeKgnMFsfY65tiO3kxfHBPr03lyswLrgdtjPhI0Y7Q,6934
|
147
|
-
autogluon/tabular/predictor/predictor.py,sha256=
|
147
|
+
autogluon/tabular/predictor/predictor.py,sha256=A86C_9ixByrSrcU-Bxy9eTTZ7nCnIvRDN2uY-0ANLVY,357082
|
148
|
+
autogluon/tabular/register/__init__.py,sha256=7CLOTWIUho0wi4eAwhYJ5Y0PfvNCWKnRwlw3bwYoTNE,93
|
149
|
+
autogluon/tabular/register/_ag_model_register.py,sha256=tMr-QgxgCE49tdThdSFOZaJg2D9ckDh6fiR5K4cRtvk,1564
|
150
|
+
autogluon/tabular/register/_model_register.py,sha256=jqSg0d89dXAAcp-OT4II90ce994ByKMMzAYmpkyaRbI,6824
|
148
151
|
autogluon/tabular/trainer/__init__.py,sha256=PW_PGL-tWoQzx3ES2S53bQEZOtsRWTYiM9QdOqsk0dI,38
|
149
|
-
autogluon/tabular/trainer/abstract_trainer.py,sha256=
|
152
|
+
autogluon/tabular/trainer/abstract_trainer.py,sha256=lqOjVTLUaZNue4B7u47PYXTXsBEFbSJ4SyruNeChFCk,231925
|
150
153
|
autogluon/tabular/trainer/auto_trainer.py,sha256=FyRWM8iUJuDvw_aqV5EV_xdh_pb-nHzAvG1sbEhvs0g,8680
|
151
154
|
autogluon/tabular/trainer/model_presets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
152
|
-
autogluon/tabular/trainer/model_presets/presets.py,sha256=
|
155
|
+
autogluon/tabular/trainer/model_presets/presets.py,sha256=bTPGPyz07a7GG6327yO6ryuWbNc1aq3hF1qzZL-Xe4c,16733
|
153
156
|
autogluon/tabular/trainer/model_presets/presets_distill.py,sha256=MnFC2GJc6RmDBNAGbsO2XMfo3PjR8cUrZoilWW8gTYQ,3295
|
154
157
|
autogluon/tabular/tuning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
155
158
|
autogluon/tabular/tuning/feature_pruner.py,sha256=9iNku8gVbYEkjuKlyITPJDicsNkoraaQOlINQq9iZlQ,6877
|
156
|
-
autogluon.tabular-1.2.
|
157
|
-
autogluon.tabular-1.2.
|
158
|
-
autogluon.tabular-1.2.
|
159
|
-
autogluon.tabular-1.2.
|
160
|
-
autogluon.tabular-1.2.
|
161
|
-
autogluon.tabular-1.2.
|
162
|
-
autogluon.tabular-1.2.
|
163
|
-
autogluon.tabular-1.2.
|
159
|
+
autogluon.tabular-1.2.1b20250227.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
|
160
|
+
autogluon.tabular-1.2.1b20250227.dist-info/METADATA,sha256=nw0R8W2F-rUL_zr6eevigQGHEmmFKBhH2TGfPHvjuUE,14386
|
161
|
+
autogluon.tabular-1.2.1b20250227.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
|
162
|
+
autogluon.tabular-1.2.1b20250227.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
163
|
+
autogluon.tabular-1.2.1b20250227.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
164
|
+
autogluon.tabular-1.2.1b20250227.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
165
|
+
autogluon.tabular-1.2.1b20250227.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
166
|
+
autogluon.tabular-1.2.1b20250227.dist-info/RECORD,,
|
File without changes
|
{autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/LICENSE
RENAMED
File without changes
|
{autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/NOTICE
RENAMED
File without changes
|
{autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/WHEEL
RENAMED
File without changes
|
File without changes
|
File without changes
|
{autogluon.tabular-1.2.1b20250225.dist-info → autogluon.tabular-1.2.1b20250227.dist-info}/zip-safe
RENAMED
File without changes
|