autogluon.tabular 1.4.1b20251014__py3-none-any.whl → 1.4.1b20251214__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autogluon/tabular/models/catboost/catboost_model.py +1 -4
- autogluon/tabular/models/ebm/ebm_model.py +2 -6
- autogluon/tabular/models/fastainn/tabular_nn_fastai.py +4 -2
- autogluon/tabular/models/lgb/lgb_model.py +2 -9
- autogluon/tabular/models/lr/lr_model.py +2 -4
- autogluon/tabular/models/lr/lr_preprocessing_utils.py +6 -7
- autogluon/tabular/models/mitra/mitra_model.py +2 -7
- autogluon/tabular/models/realmlp/realmlp_model.py +1 -4
- autogluon/tabular/models/rf/rf_model.py +6 -8
- autogluon/tabular/models/tabicl/tabicl_model.py +1 -4
- autogluon/tabular/models/tabm/tabm_model.py +76 -3
- autogluon/tabular/models/tabpfnmix/tabpfnmix_model.py +7 -5
- autogluon/tabular/models/tabpfnv2/tabpfnv2_model.py +1 -4
- autogluon/tabular/models/tabular_nn/torch/tabular_nn_torch.py +2 -5
- autogluon/tabular/models/xgboost/xgboost_model.py +1 -4
- autogluon/tabular/predictor/predictor.py +3 -2
- autogluon/tabular/testing/fit_helper.py +28 -0
- autogluon/tabular/version.py +1 -1
- autogluon.tabular-1.4.1b20251214-py3.11-nspkg.pth +1 -0
- {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.4.1b20251214.dist-info}/METADATA +90 -81
- {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.4.1b20251214.dist-info}/RECORD +27 -27
- {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.4.1b20251214.dist-info}/WHEEL +1 -1
- autogluon.tabular-1.4.1b20251014-py3.9-nspkg.pth +0 -1
- {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.4.1b20251214.dist-info/licenses}/LICENSE +0 -0
- {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.4.1b20251214.dist-info/licenses}/NOTICE +0 -0
- {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.4.1b20251214.dist-info}/namespace_packages.txt +0 -0
- {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.4.1b20251214.dist-info}/top_level.txt +0 -0
- {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.4.1b20251214.dist-info}/zip-safe +0 -0
|
@@ -39,6 +39,7 @@ class CatBoostModel(AbstractModel):
|
|
|
39
39
|
ag_priority_by_problem_type = MappingProxyType({
|
|
40
40
|
SOFTCLASS: 60
|
|
41
41
|
})
|
|
42
|
+
seed_name = "random_seed"
|
|
42
43
|
|
|
43
44
|
def __init__(self, **kwargs):
|
|
44
45
|
super().__init__(**kwargs)
|
|
@@ -116,9 +117,6 @@ class CatBoostModel(AbstractModel):
|
|
|
116
117
|
approx_mem_size_req = data_mem_usage_bytes + histogram_mem_usage_bytes + baseline_memory_bytes
|
|
117
118
|
return approx_mem_size_req
|
|
118
119
|
|
|
119
|
-
def _get_random_seed_from_hyperparameters(self, hyperparameters: dict) -> int | None | str:
|
|
120
|
-
return hyperparameters.get("random_seed", "N/A")
|
|
121
|
-
|
|
122
120
|
# TODO: Use Pool in preprocess, optimize bagging to do Pool.split() to avoid re-computing pool for each fold! Requires stateful + y
|
|
123
121
|
# Pool is much more memory efficient, avoids copying data twice in memory
|
|
124
122
|
def _fit(self, X, y, X_val=None, y_val=None, time_limit=None, num_gpus=0, num_cpus=-1, sample_weight=None, sample_weight_val=None, **kwargs):
|
|
@@ -128,7 +126,6 @@ class CatBoostModel(AbstractModel):
|
|
|
128
126
|
|
|
129
127
|
ag_params = self._get_ag_params()
|
|
130
128
|
params = self._get_model_params()
|
|
131
|
-
params["random_seed"] = self.random_seed
|
|
132
129
|
|
|
133
130
|
params["thread_count"] = num_cpus
|
|
134
131
|
if self.problem_type == SOFTCLASS:
|
|
@@ -56,6 +56,7 @@ class EBMModel(AbstractModel):
|
|
|
56
56
|
ag_key = "EBM"
|
|
57
57
|
ag_name = "EBM"
|
|
58
58
|
ag_priority = 35
|
|
59
|
+
seed_name = "random_state"
|
|
59
60
|
|
|
60
61
|
def _fit(
|
|
61
62
|
self,
|
|
@@ -89,7 +90,7 @@ class EBMModel(AbstractModel):
|
|
|
89
90
|
|
|
90
91
|
# Init Class
|
|
91
92
|
model_cls = get_class_from_problem_type(self.problem_type)
|
|
92
|
-
self.model = model_cls(
|
|
93
|
+
self.model = model_cls(**params)
|
|
93
94
|
|
|
94
95
|
# Handle validation data format for EBM
|
|
95
96
|
fit_X = X
|
|
@@ -112,11 +113,6 @@ class EBMModel(AbstractModel):
|
|
|
112
113
|
)
|
|
113
114
|
self.model.fit(fit_X, fit_y, sample_weight=fit_sample_weight, bags=bags)
|
|
114
115
|
|
|
115
|
-
def _get_random_seed_from_hyperparameters(
|
|
116
|
-
self, hyperparameters: dict
|
|
117
|
-
) -> int | None | str:
|
|
118
|
-
return hyperparameters.get("random_state", "N/A")
|
|
119
|
-
|
|
120
116
|
def _set_default_params(self):
|
|
121
117
|
default_params = get_param_baseline(problem_type=self.problem_type, num_classes=self.num_classes)
|
|
122
118
|
for param, val in default_params.items():
|
|
@@ -103,6 +103,7 @@ class NNFastAiTabularModel(AbstractModel):
|
|
|
103
103
|
ag_priority_by_problem_type = MappingProxyType({
|
|
104
104
|
MULTICLASS: 95,
|
|
105
105
|
})
|
|
106
|
+
seed_name = "random_seed"
|
|
106
107
|
|
|
107
108
|
model_internals_file_name = "model-internals.pkl"
|
|
108
109
|
|
|
@@ -322,8 +323,9 @@ class NNFastAiTabularModel(AbstractModel):
|
|
|
322
323
|
# Make deterministic
|
|
323
324
|
from fastai.torch_core import set_seed
|
|
324
325
|
|
|
325
|
-
|
|
326
|
-
|
|
326
|
+
random_seed = params.pop(self.seed_name, self.default_random_seed)
|
|
327
|
+
set_seed(random_seed, True)
|
|
328
|
+
dls.rng.seed(random_seed)
|
|
327
329
|
|
|
328
330
|
if self.problem_type == QUANTILE:
|
|
329
331
|
dls.c = len(self.quantile_levels)
|
|
@@ -46,6 +46,8 @@ class LGBModel(AbstractModel):
|
|
|
46
46
|
ag_priority_by_problem_type = MappingProxyType({
|
|
47
47
|
SOFTCLASS: 100
|
|
48
48
|
})
|
|
49
|
+
seed_name = "seed"
|
|
50
|
+
seed_name_alt = ["seed_value", "random_seed", "random_state"]
|
|
49
51
|
|
|
50
52
|
def __init__(self, **kwargs):
|
|
51
53
|
super().__init__(**kwargs)
|
|
@@ -128,13 +130,6 @@ class LGBModel(AbstractModel):
|
|
|
128
130
|
approx_mem_size_req = data_mem_usage_bytes + histogram_mem_usage_bytes + mem_size_estimators
|
|
129
131
|
return approx_mem_size_req
|
|
130
132
|
|
|
131
|
-
def _get_random_seed_from_hyperparameters(self, hyperparameters: dict) -> int | None | str:
|
|
132
|
-
if "seed_value" in hyperparameters:
|
|
133
|
-
return hyperparameters["seed_value"]
|
|
134
|
-
if "seed" in hyperparameters:
|
|
135
|
-
return hyperparameters["seed"]
|
|
136
|
-
return "N/A"
|
|
137
|
-
|
|
138
133
|
def _fit(self, X, y, X_val=None, y_val=None, time_limit=None, num_gpus=0, num_cpus=0, sample_weight=None, sample_weight_val=None, verbosity=2, **kwargs):
|
|
139
134
|
try_import_lightgbm() # raise helpful error message if LightGBM isn't installed
|
|
140
135
|
start_time = time.time()
|
|
@@ -292,8 +287,6 @@ class LGBModel(AbstractModel):
|
|
|
292
287
|
elif self.problem_type == QUANTILE:
|
|
293
288
|
train_params["params"]["quantile_levels"] = self.quantile_levels
|
|
294
289
|
|
|
295
|
-
train_params["params"]["seed"] = self.random_seed
|
|
296
|
-
|
|
297
290
|
# Train LightGBM model:
|
|
298
291
|
# Note that self.model contains a <class 'lightgbm.basic.Booster'> not a LightBGMClassifier or LightGBMRegressor object
|
|
299
292
|
from lightgbm.basic import LightGBMError
|
|
@@ -43,6 +43,7 @@ class LinearModel(AbstractModel):
|
|
|
43
43
|
ag_key = "LR"
|
|
44
44
|
ag_name = "LinearModel"
|
|
45
45
|
ag_priority = 30
|
|
46
|
+
seed_name = "random_state"
|
|
46
47
|
|
|
47
48
|
def __init__(self, **kwargs):
|
|
48
49
|
super().__init__(**kwargs)
|
|
@@ -162,9 +163,6 @@ class LinearModel(AbstractModel):
|
|
|
162
163
|
for param, val in default_params.items():
|
|
163
164
|
self._set_default_param_value(param, val)
|
|
164
165
|
|
|
165
|
-
def _get_random_seed_from_hyperparameters(self, hyperparameters: dict) -> int | None | str:
|
|
166
|
-
return hyperparameters.get("random_seed", "N/A")
|
|
167
|
-
|
|
168
166
|
def _get_default_searchspace(self):
|
|
169
167
|
return get_default_searchspace(self.problem_type)
|
|
170
168
|
|
|
@@ -218,7 +216,7 @@ class LinearModel(AbstractModel):
|
|
|
218
216
|
total_iter = 0
|
|
219
217
|
total_iter_used = 0
|
|
220
218
|
total_max_iter = sum(max_iter_list)
|
|
221
|
-
model = model_cls(max_iter=max_iter_list[0],
|
|
219
|
+
model = model_cls(max_iter=max_iter_list[0], **params)
|
|
222
220
|
early_stop = False
|
|
223
221
|
for i, cur_max_iter in enumerate(max_iter_list):
|
|
224
222
|
if time_left is not None and (i > 0):
|
|
@@ -5,20 +5,19 @@ from autogluon.features.generators import OneHotEncoderFeatureGenerator
|
|
|
5
5
|
|
|
6
6
|
class OheFeaturesGenerator(BaseEstimator, TransformerMixin):
|
|
7
7
|
def __init__(self):
|
|
8
|
-
|
|
9
|
-
self._encoder = None
|
|
8
|
+
pass
|
|
10
9
|
|
|
11
10
|
def fit(self, X, y=None):
|
|
12
|
-
self.
|
|
13
|
-
self.
|
|
14
|
-
self.
|
|
11
|
+
self.encoder_ = OneHotEncoderFeatureGenerator(max_levels=10000, verbosity=0)
|
|
12
|
+
self.encoder_.fit(X)
|
|
13
|
+
self.feature_names_ = self.encoder_.features_out
|
|
15
14
|
return self
|
|
16
15
|
|
|
17
16
|
def transform(self, X, y=None):
|
|
18
|
-
return self.
|
|
17
|
+
return self.encoder_.transform_ohe(X)
|
|
19
18
|
|
|
20
19
|
def get_feature_names(self):
|
|
21
|
-
return self.
|
|
20
|
+
return self.feature_names_
|
|
22
21
|
|
|
23
22
|
|
|
24
23
|
class NlpDataPreprocessor(BaseEstimator, TransformerMixin):
|
|
@@ -32,6 +32,7 @@ class MitraModel(AbstractModel):
|
|
|
32
32
|
ag_name = "Mitra"
|
|
33
33
|
weights_file_name = "model.pt"
|
|
34
34
|
ag_priority = 55
|
|
35
|
+
seed_name = "seed"
|
|
35
36
|
|
|
36
37
|
def __init__(self, **kwargs):
|
|
37
38
|
super().__init__(**kwargs)
|
|
@@ -77,9 +78,6 @@ class MitraModel(AbstractModel):
|
|
|
77
78
|
|
|
78
79
|
return X
|
|
79
80
|
|
|
80
|
-
def _get_random_seed_from_hyperparameters(self, hyperparameters: dict) -> int | None | str:
|
|
81
|
-
return hyperparameters.get("seed", "N/A")
|
|
82
|
-
|
|
83
81
|
def _fit(
|
|
84
82
|
self,
|
|
85
83
|
X: pd.DataFrame,
|
|
@@ -157,10 +155,7 @@ class MitraModel(AbstractModel):
|
|
|
157
155
|
if "verbose" not in hyp:
|
|
158
156
|
hyp["verbose"] = verbosity >= 3
|
|
159
157
|
|
|
160
|
-
self.model = model_cls(
|
|
161
|
-
seed=self.random_seed,
|
|
162
|
-
**hyp,
|
|
163
|
-
)
|
|
158
|
+
self.model = model_cls(**hyp)
|
|
164
159
|
|
|
165
160
|
X = self.preprocess(X, is_train=True)
|
|
166
161
|
if X_val is not None:
|
|
@@ -51,6 +51,7 @@ class RealMLPModel(AbstractModel):
|
|
|
51
51
|
ag_key = "REALMLP"
|
|
52
52
|
ag_name = "RealMLP"
|
|
53
53
|
ag_priority = 75
|
|
54
|
+
seed_name = "random_state"
|
|
54
55
|
|
|
55
56
|
def __init__(self, **kwargs):
|
|
56
57
|
super().__init__(**kwargs)
|
|
@@ -82,9 +83,6 @@ class RealMLPModel(AbstractModel):
|
|
|
82
83
|
model_cls = RealMLP_TD_S_Regressor
|
|
83
84
|
return model_cls
|
|
84
85
|
|
|
85
|
-
def _get_random_seed_from_hyperparameters(self, hyperparameters: dict) -> int | None | str:
|
|
86
|
-
return hyperparameters.get("random_state", "N/A")
|
|
87
|
-
|
|
88
86
|
def _fit(
|
|
89
87
|
self,
|
|
90
88
|
X: pd.DataFrame,
|
|
@@ -178,7 +176,6 @@ class RealMLPModel(AbstractModel):
|
|
|
178
176
|
self.model = model_cls(
|
|
179
177
|
n_threads=num_cpus,
|
|
180
178
|
device=device,
|
|
181
|
-
random_state=self.random_seed,
|
|
182
179
|
**init_kwargs,
|
|
183
180
|
**hyp,
|
|
184
181
|
)
|
|
@@ -30,6 +30,7 @@ class RFModel(AbstractModel):
|
|
|
30
30
|
ag_key = "RF"
|
|
31
31
|
ag_name = "RandomForest"
|
|
32
32
|
ag_priority = 80
|
|
33
|
+
seed_name = "random_state"
|
|
33
34
|
|
|
34
35
|
def __init__(self, **kwargs):
|
|
35
36
|
super().__init__(**kwargs)
|
|
@@ -107,9 +108,6 @@ class RFModel(AbstractModel):
|
|
|
107
108
|
for param, val in default_params.items():
|
|
108
109
|
self._set_default_param_value(param, val)
|
|
109
110
|
|
|
110
|
-
def _get_random_seed_from_hyperparameters(self, hyperparameters: dict) -> int | None | str:
|
|
111
|
-
return hyperparameters.get("random_state", "N/A")
|
|
112
|
-
|
|
113
111
|
# TODO: Add in documentation that Categorical default is the first index
|
|
114
112
|
# TODO: enable HPO for RF models
|
|
115
113
|
def _get_default_searchspace(self):
|
|
@@ -153,13 +151,13 @@ class RFModel(AbstractModel):
|
|
|
153
151
|
hyperparameters = {}
|
|
154
152
|
n_estimators_final = hyperparameters.get("n_estimators", 300)
|
|
155
153
|
if isinstance(n_estimators_final, int):
|
|
156
|
-
|
|
154
|
+
n_estimators = n_estimators_final
|
|
157
155
|
else: # if search space
|
|
158
|
-
|
|
156
|
+
n_estimators = 40
|
|
159
157
|
num_trees_per_estimator = cls._get_num_trees_per_estimator_static(problem_type=problem_type, num_classes=num_classes)
|
|
160
158
|
bytes_per_estimator = num_trees_per_estimator * len(X) / 60000 * 1e6 # Underestimates by 3x on ExtraTrees
|
|
161
|
-
|
|
162
|
-
return
|
|
159
|
+
expected_memory_usage = int(bytes_per_estimator * n_estimators)
|
|
160
|
+
return expected_memory_usage
|
|
163
161
|
|
|
164
162
|
def _validate_fit_memory_usage(self, mem_error_threshold: float = 0.5, mem_warning_threshold: float = 0.4, mem_size_threshold: int = 1e7, **kwargs):
|
|
165
163
|
return super()._validate_fit_memory_usage(
|
|
@@ -208,7 +206,7 @@ class RFModel(AbstractModel):
|
|
|
208
206
|
# FIXME: This is inefficient but sklearnex doesn't support computing oob_score after training
|
|
209
207
|
params["oob_score"] = True
|
|
210
208
|
|
|
211
|
-
model = model_cls(
|
|
209
|
+
model = model_cls(**params)
|
|
212
210
|
|
|
213
211
|
time_train_start = time.time()
|
|
214
212
|
for i, n_estimators in enumerate(n_estimator_increments):
|
|
@@ -35,6 +35,7 @@ class TabICLModel(AbstractModel):
|
|
|
35
35
|
ag_key = "TABICL"
|
|
36
36
|
ag_name = "TabICL"
|
|
37
37
|
ag_priority = 65
|
|
38
|
+
seed_name = "random_state"
|
|
38
39
|
|
|
39
40
|
def get_model_cls(self):
|
|
40
41
|
from tabicl import TabICLClassifier
|
|
@@ -89,7 +90,6 @@ class TabICLModel(AbstractModel):
|
|
|
89
90
|
**hyp,
|
|
90
91
|
device=device,
|
|
91
92
|
n_jobs=num_cpus,
|
|
92
|
-
random_state=self.random_seed,
|
|
93
93
|
)
|
|
94
94
|
X = self.preprocess(X)
|
|
95
95
|
self.model = self.model.fit(
|
|
@@ -97,9 +97,6 @@ class TabICLModel(AbstractModel):
|
|
|
97
97
|
y=y,
|
|
98
98
|
)
|
|
99
99
|
|
|
100
|
-
def _get_random_seed_from_hyperparameters(self, hyperparameters: dict) -> int | None | str:
|
|
101
|
-
return hyperparameters.get("random_state", "N/A")
|
|
102
|
-
|
|
103
100
|
def _get_default_auxiliary_params(self) -> dict:
|
|
104
101
|
default_auxiliary_params = super()._get_default_auxiliary_params()
|
|
105
102
|
default_auxiliary_params.update(
|
|
@@ -39,6 +39,7 @@ class TabMModel(AbstractModel):
|
|
|
39
39
|
ag_key = "TABM"
|
|
40
40
|
ag_name = "TabM"
|
|
41
41
|
ag_priority = 85
|
|
42
|
+
seed_name = "random_state"
|
|
42
43
|
|
|
43
44
|
def __init__(self, **kwargs):
|
|
44
45
|
super().__init__(**kwargs)
|
|
@@ -48,6 +49,7 @@ class TabMModel(AbstractModel):
|
|
|
48
49
|
self._indicator_columns = None
|
|
49
50
|
self._features_bool = None
|
|
50
51
|
self._bool_to_cat = None
|
|
52
|
+
self.device = None
|
|
51
53
|
|
|
52
54
|
def _fit(
|
|
53
55
|
self,
|
|
@@ -106,7 +108,6 @@ class TabMModel(AbstractModel):
|
|
|
106
108
|
device=device,
|
|
107
109
|
problem_type=self.problem_type,
|
|
108
110
|
early_stopping_metric=self.stopping_metric,
|
|
109
|
-
random_state=self.random_seed,
|
|
110
111
|
**hyp,
|
|
111
112
|
)
|
|
112
113
|
|
|
@@ -142,8 +143,80 @@ class TabMModel(AbstractModel):
|
|
|
142
143
|
|
|
143
144
|
return X
|
|
144
145
|
|
|
145
|
-
def
|
|
146
|
-
|
|
146
|
+
def save(self, path: str = None, verbose=True) -> str:
|
|
147
|
+
"""
|
|
148
|
+
Need to set device to CPU to be able to load on a non-GPU environment
|
|
149
|
+
"""
|
|
150
|
+
import torch
|
|
151
|
+
|
|
152
|
+
# Save on CPU to ensure the model can be loaded without GPU
|
|
153
|
+
if self.model is not None:
|
|
154
|
+
self.device = self.model.device_
|
|
155
|
+
device_cpu = torch.device("cpu")
|
|
156
|
+
self.model.model_ = self.model.model_.to(device_cpu)
|
|
157
|
+
self.model.device_ = device_cpu
|
|
158
|
+
path = super().save(path=path, verbose=verbose)
|
|
159
|
+
# Put the model back to the device after the save
|
|
160
|
+
if self.model is not None:
|
|
161
|
+
self.model.model_.to(self.device)
|
|
162
|
+
self.model.device_ = self.device
|
|
163
|
+
|
|
164
|
+
return path
|
|
165
|
+
|
|
166
|
+
@classmethod
|
|
167
|
+
def load(cls, path: str, reset_paths=True, verbose=True):
|
|
168
|
+
"""
|
|
169
|
+
Loads the model from disk to memory.
|
|
170
|
+
The loaded model will be on the same device it was trained on (cuda/mps);
|
|
171
|
+
if the device is not available (trained on GPU, deployed on CPU), then `cpu` will be used.
|
|
172
|
+
|
|
173
|
+
Parameters
|
|
174
|
+
----------
|
|
175
|
+
path : str
|
|
176
|
+
Path to the saved model, minus the file name.
|
|
177
|
+
This should generally be a directory path ending with a '/' character (or appropriate path separator value depending on OS).
|
|
178
|
+
The model file is typically located in os.path.join(path, cls.model_file_name).
|
|
179
|
+
reset_paths : bool, default True
|
|
180
|
+
Whether to reset the self.path value of the loaded model to be equal to path.
|
|
181
|
+
It is highly recommended to keep this value as True unless accessing the original self.path value is important.
|
|
182
|
+
If False, the actual valid path and self.path may differ, leading to strange behaviour and potential exceptions if the model needs to load any other files at a later time.
|
|
183
|
+
verbose : bool, default True
|
|
184
|
+
Whether to log the location of the loaded file.
|
|
185
|
+
|
|
186
|
+
Returns
|
|
187
|
+
-------
|
|
188
|
+
model : cls
|
|
189
|
+
Loaded model object.
|
|
190
|
+
"""
|
|
191
|
+
import torch
|
|
192
|
+
|
|
193
|
+
model: TabMModel = super().load(path=path, reset_paths=reset_paths, verbose=verbose)
|
|
194
|
+
|
|
195
|
+
# Put the model on the same device it was trained on (GPU/MPS) if it is available; otherwise use CPU
|
|
196
|
+
if model.model is not None:
|
|
197
|
+
original_device_type = model.device.type
|
|
198
|
+
if "cuda" in original_device_type:
|
|
199
|
+
# cuda: nvidia GPU
|
|
200
|
+
device = torch.device(original_device_type if torch.cuda.is_available() else "cpu")
|
|
201
|
+
elif "mps" in original_device_type:
|
|
202
|
+
# mps: Apple Silicon
|
|
203
|
+
device = torch.device(original_device_type if torch.backends.mps.is_available() else "cpu")
|
|
204
|
+
else:
|
|
205
|
+
device = torch.device(original_device_type)
|
|
206
|
+
|
|
207
|
+
if verbose and (original_device_type != device.type):
|
|
208
|
+
logger.log(15, f"Model is trained on {original_device_type}, but the device is not available - loading on {device.type}")
|
|
209
|
+
|
|
210
|
+
model.set_device(device=device)
|
|
211
|
+
|
|
212
|
+
return model
|
|
213
|
+
|
|
214
|
+
def set_device(self, device):
|
|
215
|
+
self.device = device
|
|
216
|
+
if self.model is not None:
|
|
217
|
+
self.model.device_ = device
|
|
218
|
+
if self.model.model_ is not None:
|
|
219
|
+
self.model.model_ = self.model.model_.to(device)
|
|
147
220
|
|
|
148
221
|
@classmethod
|
|
149
222
|
def supported_problem_types(cls) -> list[str] | None:
|
|
@@ -42,6 +42,7 @@ class TabPFNMixModel(AbstractModel):
|
|
|
42
42
|
ag_key = "TABPFNMIX"
|
|
43
43
|
ag_name = "TabPFNMix"
|
|
44
44
|
ag_priority = 45
|
|
45
|
+
seed_name = "random_state"
|
|
45
46
|
|
|
46
47
|
weights_file_name = "model.pt"
|
|
47
48
|
|
|
@@ -123,6 +124,7 @@ class TabPFNMixModel(AbstractModel):
|
|
|
123
124
|
raise AssertionError(f"Max allowed classes for the model is {max_classes}, " f"but found {self.num_classes} classes.")
|
|
124
125
|
|
|
125
126
|
params = self._get_model_params()
|
|
127
|
+
random_state = params.pop(self.seed_name, self.default_random_seed)
|
|
126
128
|
sample_rows = ag_params.get("sample_rows", None)
|
|
127
129
|
sample_rows_val = ag_params.get("sample_rows_val", None)
|
|
128
130
|
max_rows = ag_params.get("max_rows", None)
|
|
@@ -133,11 +135,11 @@ class TabPFNMixModel(AbstractModel):
|
|
|
133
135
|
|
|
134
136
|
# TODO: Make sample_rows generic
|
|
135
137
|
if sample_rows is not None and isinstance(sample_rows, int) and len(X) > sample_rows:
|
|
136
|
-
X, y = self._subsample_data(X=X, y=y, num_rows=sample_rows)
|
|
138
|
+
X, y = self._subsample_data(X=X, y=y, num_rows=sample_rows, random_state=random_state)
|
|
137
139
|
|
|
138
140
|
# TODO: Make sample_rows generic
|
|
139
141
|
if X_val is not None and y_val is not None and sample_rows_val is not None and isinstance(sample_rows_val, int) and len(X_val) > sample_rows_val:
|
|
140
|
-
X_val, y_val = self._subsample_data(X=X_val, y=y_val, num_rows=sample_rows_val)
|
|
142
|
+
X_val, y_val = self._subsample_data(X=X_val, y=y_val, num_rows=sample_rows_val, random_state=random_state)
|
|
141
143
|
|
|
142
144
|
from ._internal.core.enums import Task
|
|
143
145
|
if self.problem_type in [REGRESSION, QUANTILE]:
|
|
@@ -178,7 +180,7 @@ class TabPFNMixModel(AbstractModel):
|
|
|
178
180
|
elif weights_path is not None:
|
|
179
181
|
logger.log(15, f'\tLoading pre-trained weights from file... (weights_path="{weights_path}")')
|
|
180
182
|
|
|
181
|
-
cfg = ConfigRun(hyperparams=params, task=task, device=device, seed=
|
|
183
|
+
cfg = ConfigRun(hyperparams=params, task=task, device=device, seed=random_state)
|
|
182
184
|
|
|
183
185
|
if cfg.hyperparams["max_epochs"] == 0 and cfg.hyperparams["n_ensembles"] != 1:
|
|
184
186
|
logger.log(
|
|
@@ -242,14 +244,14 @@ class TabPFNMixModel(AbstractModel):
|
|
|
242
244
|
return self
|
|
243
245
|
|
|
244
246
|
# TODO: Make this generic by creating a generic `preprocess_train` and putting this logic prior to `_preprocess`.
|
|
245
|
-
def _subsample_data(self, X: pd.DataFrame, y: pd.Series, num_rows: int) -> (pd.DataFrame, pd.Series):
|
|
247
|
+
def _subsample_data(self, X: pd.DataFrame, y: pd.Series, num_rows: int, random_state: int | None = 0) -> (pd.DataFrame, pd.Series):
|
|
246
248
|
num_rows_to_drop = len(X) - num_rows
|
|
247
249
|
X, _, y, _ = generate_train_test_split(
|
|
248
250
|
X=X,
|
|
249
251
|
y=y,
|
|
250
252
|
problem_type=self.problem_type,
|
|
251
253
|
test_size=num_rows_to_drop,
|
|
252
|
-
random_state=
|
|
254
|
+
random_state=random_state,
|
|
253
255
|
min_cls_count_train=1,
|
|
254
256
|
)
|
|
255
257
|
return X, y
|
|
@@ -122,6 +122,7 @@ class TabPFNV2Model(AbstractModel):
|
|
|
122
122
|
ag_key = "TABPFNV2"
|
|
123
123
|
ag_name = "TabPFNv2"
|
|
124
124
|
ag_priority = 105
|
|
125
|
+
seed_name = "random_state"
|
|
125
126
|
|
|
126
127
|
def __init__(self, **kwargs):
|
|
127
128
|
super().__init__(**kwargs)
|
|
@@ -201,7 +202,6 @@ class TabPFNV2Model(AbstractModel):
|
|
|
201
202
|
X = self.preprocess(X, is_train=True)
|
|
202
203
|
|
|
203
204
|
hps = self._get_model_params()
|
|
204
|
-
hps["random_state"] = self.random_seed
|
|
205
205
|
hps["device"] = device
|
|
206
206
|
hps["n_jobs"] = num_cpus
|
|
207
207
|
hps["categorical_features_indices"] = self._cat_indices
|
|
@@ -306,9 +306,6 @@ class TabPFNV2Model(AbstractModel):
|
|
|
306
306
|
for param, val in default_params.items():
|
|
307
307
|
self._set_default_param_value(param, val)
|
|
308
308
|
|
|
309
|
-
def _get_random_seed_from_hyperparameters(self, hyperparameters: dict) -> int | None | str:
|
|
310
|
-
return hyperparameters.get("random_state", "N/A")
|
|
311
|
-
|
|
312
309
|
@classmethod
|
|
313
310
|
def supported_problem_types(cls) -> list[str] | None:
|
|
314
311
|
return ["binary", "multiclass", "regression"]
|
|
@@ -50,6 +50,7 @@ class TabularNeuralNetTorchModel(AbstractNeuralNetworkModel):
|
|
|
50
50
|
ag_key = "NN_TORCH"
|
|
51
51
|
ag_name = "NeuralNetTorch"
|
|
52
52
|
ag_priority = 25
|
|
53
|
+
seed_name = "seed_value"
|
|
53
54
|
|
|
54
55
|
# Constants used throughout this class:
|
|
55
56
|
unique_category_str = np.nan # string used to represent missing values and unknown categories for categorical features.
|
|
@@ -164,9 +165,6 @@ class TabularNeuralNetTorchModel(AbstractNeuralNetworkModel):
|
|
|
164
165
|
|
|
165
166
|
return processor_kwargs, optimizer_kwargs, fit_kwargs, loss_kwargs, params
|
|
166
167
|
|
|
167
|
-
def _get_random_seed_from_hyperparameters(self, hyperparameters: dict) -> int | None | str:
|
|
168
|
-
return hyperparameters.get("seed_value", "N/A")
|
|
169
|
-
|
|
170
168
|
def _fit(
|
|
171
169
|
self,
|
|
172
170
|
X: pd.DataFrame,
|
|
@@ -194,7 +192,7 @@ class TabularNeuralNetTorchModel(AbstractNeuralNetworkModel):
|
|
|
194
192
|
|
|
195
193
|
processor_kwargs, optimizer_kwargs, fit_kwargs, loss_kwargs, params = self._prepare_params(params=params)
|
|
196
194
|
|
|
197
|
-
seed_value = self.
|
|
195
|
+
seed_value = params.pop(self.seed_name, self.default_random_seed)
|
|
198
196
|
|
|
199
197
|
self._num_cpus_infer = params.pop("_num_cpus_infer", 1)
|
|
200
198
|
if seed_value is not None: # Set seeds
|
|
@@ -373,7 +371,6 @@ class TabularNeuralNetTorchModel(AbstractNeuralNetworkModel):
|
|
|
373
371
|
best_epoch = 0
|
|
374
372
|
best_val_metric = -np.inf # higher = better
|
|
375
373
|
best_val_update = 0
|
|
376
|
-
val_improve_epoch = 0 # most recent epoch where validation-score strictly improved
|
|
377
374
|
start_fit_time = time.time()
|
|
378
375
|
if time_limit is not None:
|
|
379
376
|
time_limit = time_limit - (start_fit_time - start_time)
|
|
@@ -32,6 +32,7 @@ class XGBoostModel(AbstractModel):
|
|
|
32
32
|
ag_key = "XGB"
|
|
33
33
|
ag_name = "XGBoost"
|
|
34
34
|
ag_priority = 40
|
|
35
|
+
seed_name = "seed"
|
|
35
36
|
|
|
36
37
|
def __init__(self, **kwargs):
|
|
37
38
|
super().__init__(**kwargs)
|
|
@@ -75,15 +76,11 @@ class XGBoostModel(AbstractModel):
|
|
|
75
76
|
|
|
76
77
|
return X
|
|
77
78
|
|
|
78
|
-
def _get_random_seed_from_hyperparameters(self, hyperparameters: dict) -> int | None | str:
|
|
79
|
-
return hyperparameters.get("seed", "N/A")
|
|
80
|
-
|
|
81
79
|
def _fit(self, X, y, X_val=None, y_val=None, time_limit=None, num_gpus=0, num_cpus=None, sample_weight=None, sample_weight_val=None, verbosity=2, **kwargs):
|
|
82
80
|
# TODO: utilize sample_weight_val in early-stopping if provided
|
|
83
81
|
start_time = time.time()
|
|
84
82
|
ag_params = self._get_ag_params()
|
|
85
83
|
params = self._get_model_params()
|
|
86
|
-
params["seed"] = self.random_seed
|
|
87
84
|
generate_curves = ag_params.get("generate_curves", False)
|
|
88
85
|
|
|
89
86
|
if generate_curves:
|
|
@@ -20,6 +20,7 @@ from autogluon.common import FeatureMetadata, TabularDataset
|
|
|
20
20
|
from autogluon.common.loaders import load_json
|
|
21
21
|
from autogluon.common.savers import save_json
|
|
22
22
|
from autogluon.common.utils.file_utils import get_directory_size, get_directory_size_per_file
|
|
23
|
+
from autogluon.common.utils.resource_utils import ResourceManager, get_resource_manager
|
|
23
24
|
from autogluon.common.utils.hyperparameter_utils import get_hyperparameter_str_deprecation_msg, is_advanced_hyperparameter_format
|
|
24
25
|
from autogluon.common.utils.log_utils import add_log_to_file, set_logger_verbosity, warn_if_mlflow_autologging_is_enabled
|
|
25
26
|
from autogluon.common.utils.pandas_utils import get_approximate_df_mem_usage
|
|
@@ -1091,7 +1092,8 @@ class TabularPredictor:
|
|
|
1091
1092
|
elif verbosity >= 4:
|
|
1092
1093
|
logger.log(20, f"Verbosity: {verbosity} (Maximum Logging)")
|
|
1093
1094
|
|
|
1094
|
-
|
|
1095
|
+
resource_manager: ResourceManager = get_resource_manager()
|
|
1096
|
+
include_gpu_count = resource_manager.get_gpu_count_torch() or verbosity >= 3
|
|
1095
1097
|
sys_msg = get_ag_system_info(path=self.path, include_gpu_count=include_gpu_count)
|
|
1096
1098
|
logger.log(20, sys_msg)
|
|
1097
1099
|
|
|
@@ -1630,7 +1632,6 @@ class TabularPredictor:
|
|
|
1630
1632
|
if _ds_ray is not None:
|
|
1631
1633
|
# Handle resources
|
|
1632
1634
|
# FIXME: what about distributed?
|
|
1633
|
-
from autogluon.common.utils.resource_utils import ResourceManager
|
|
1634
1635
|
|
|
1635
1636
|
total_resources = ag_fit_kwargs["core_kwargs"]["total_resources"]
|
|
1636
1637
|
|
|
@@ -175,6 +175,7 @@ class FitHelper:
|
|
|
175
175
|
use_test_for_val: bool = False,
|
|
176
176
|
raise_on_model_failure: bool | None = None,
|
|
177
177
|
deepcopy_fit_args: bool = True,
|
|
178
|
+
verify_model_seed: bool = False,
|
|
178
179
|
) -> TabularPredictor:
|
|
179
180
|
if compiler_configs is None:
|
|
180
181
|
compiler_configs = {}
|
|
@@ -269,6 +270,11 @@ class FitHelper:
|
|
|
269
270
|
assert not model_info["val_in_fit"], f"val data must not be present in refit model if `can_refit_full=True`. Maybe an exception occurred?"
|
|
270
271
|
else:
|
|
271
272
|
assert model_info["val_in_fit"], f"val data must be present in refit model if `can_refit_full=False`"
|
|
273
|
+
if verify_model_seed:
|
|
274
|
+
model_names = predictor.model_names()
|
|
275
|
+
for model_name in model_names:
|
|
276
|
+
model = predictor._trainer.load_model(model_name)
|
|
277
|
+
_verify_model_seed(model=model)
|
|
272
278
|
|
|
273
279
|
if predictor_info:
|
|
274
280
|
predictor.info()
|
|
@@ -339,6 +345,7 @@ class FitHelper:
|
|
|
339
345
|
require_known_problem_types: bool = True,
|
|
340
346
|
raise_on_model_failure: bool = True,
|
|
341
347
|
problem_types: list[str] | None = None,
|
|
348
|
+
verify_model_seed: bool = True,
|
|
342
349
|
**kwargs,
|
|
343
350
|
):
|
|
344
351
|
"""
|
|
@@ -355,12 +362,18 @@ class FitHelper:
|
|
|
355
362
|
problem_types: list[str], optional
|
|
356
363
|
If specified, checks the given problem_types.
|
|
357
364
|
If None, checks `model_cls.supported_problem_types()`
|
|
365
|
+
verify_model_seed: bool = True
|
|
358
366
|
**kwargs
|
|
359
367
|
|
|
360
368
|
Returns
|
|
361
369
|
-------
|
|
362
370
|
|
|
363
371
|
"""
|
|
372
|
+
if verify_model_seed and model_cls.seed_name is not None:
|
|
373
|
+
# verify that the seed logic works
|
|
374
|
+
model_hyperparameters = model_hyperparameters.copy()
|
|
375
|
+
model_hyperparameters[model_cls.seed_name] = 42
|
|
376
|
+
|
|
364
377
|
fit_args = dict(
|
|
365
378
|
hyperparameters={model_cls: model_hyperparameters},
|
|
366
379
|
)
|
|
@@ -429,6 +442,7 @@ class FitHelper:
|
|
|
429
442
|
refit_full=refit_full,
|
|
430
443
|
extra_metrics=_extra_metrics,
|
|
431
444
|
raise_on_model_failure=raise_on_model_failure,
|
|
445
|
+
verify_model_seed=verify_model_seed,
|
|
432
446
|
**kwargs,
|
|
433
447
|
)
|
|
434
448
|
|
|
@@ -460,6 +474,7 @@ class FitHelper:
|
|
|
460
474
|
refit_full=refit_full,
|
|
461
475
|
extra_metrics=_extra_metrics,
|
|
462
476
|
raise_on_model_failure=raise_on_model_failure,
|
|
477
|
+
verify_model_seed=verify_model_seed,
|
|
463
478
|
**kwargs,
|
|
464
479
|
)
|
|
465
480
|
|
|
@@ -476,3 +491,16 @@ def stacked_overfitting_assert(
|
|
|
476
491
|
if expected_stacked_overfitting_at_test is not None:
|
|
477
492
|
stacked_overfitting = check_stacked_overfitting_from_leaderboard(lb)
|
|
478
493
|
assert stacked_overfitting == expected_stacked_overfitting_at_test, "Expected stacked overfitting at test mismatch!"
|
|
494
|
+
|
|
495
|
+
|
|
496
|
+
def _verify_model_seed(model: AbstractModel):
|
|
497
|
+
assert model.random_seed is None or isinstance(model.random_seed, int)
|
|
498
|
+
if model.seed_name is not None:
|
|
499
|
+
if model.seed_name in model._user_params:
|
|
500
|
+
assert model.random_seed == model._user_params[model.seed_name]
|
|
501
|
+
assert model.seed_name in model.params
|
|
502
|
+
assert model.random_seed == model.params[model.seed_name]
|
|
503
|
+
if isinstance(model, BaggedEnsembleModel):
|
|
504
|
+
for child in model.models:
|
|
505
|
+
child = model.load_child(child)
|
|
506
|
+
_verify_model_seed(child)
|
autogluon/tabular/version.py
CHANGED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('autogluon',));importlib = __import__('importlib.util');__import__('importlib.machinery');m = sys.modules.setdefault('autogluon', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('autogluon', [os.path.dirname(p)])));m = m or sys.modules.setdefault('autogluon', types.ModuleType('autogluon'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)
|
{autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.4.1b20251214.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: autogluon.tabular
|
|
3
|
-
Version: 1.4.
|
|
3
|
+
Version: 1.4.1b20251214
|
|
4
4
|
Summary: Fast and Accurate ML in 3 Lines of Code
|
|
5
5
|
Home-page: https://github.com/autogluon/autogluon
|
|
6
6
|
Author: AutoGluon Community
|
|
@@ -9,7 +9,6 @@ Project-URL: Documentation, https://auto.gluon.ai
|
|
|
9
9
|
Project-URL: Bug Reports, https://github.com/autogluon/autogluon/issues
|
|
10
10
|
Project-URL: Source, https://github.com/autogluon/autogluon/
|
|
11
11
|
Project-URL: Contribute!, https://github.com/autogluon/autogluon/blob/master/CONTRIBUTING.md
|
|
12
|
-
Platform: UNKNOWN
|
|
13
12
|
Classifier: Development Status :: 4 - Beta
|
|
14
13
|
Classifier: Intended Audience :: Education
|
|
15
14
|
Classifier: Intended Audience :: Developers
|
|
@@ -24,121 +23,130 @@ Classifier: Operating System :: Microsoft :: Windows
|
|
|
24
23
|
Classifier: Operating System :: POSIX
|
|
25
24
|
Classifier: Operating System :: Unix
|
|
26
25
|
Classifier: Programming Language :: Python :: 3
|
|
27
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
28
26
|
Classifier: Programming Language :: Python :: 3.10
|
|
29
27
|
Classifier: Programming Language :: Python :: 3.11
|
|
30
28
|
Classifier: Programming Language :: Python :: 3.12
|
|
29
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
31
30
|
Classifier: Topic :: Software Development
|
|
32
31
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
33
32
|
Classifier: Topic :: Scientific/Engineering :: Information Analysis
|
|
34
33
|
Classifier: Topic :: Scientific/Engineering :: Image Recognition
|
|
35
|
-
Requires-Python: >=3.
|
|
34
|
+
Requires-Python: >=3.10, <3.14
|
|
36
35
|
Description-Content-Type: text/markdown
|
|
37
|
-
License-File:
|
|
38
|
-
License-File:
|
|
36
|
+
License-File: LICENSE
|
|
37
|
+
License-File: NOTICE
|
|
39
38
|
Requires-Dist: numpy<2.4.0,>=1.25.0
|
|
40
39
|
Requires-Dist: scipy<1.17,>=1.5.4
|
|
41
40
|
Requires-Dist: pandas<2.4.0,>=2.0.0
|
|
42
41
|
Requires-Dist: scikit-learn<1.8.0,>=1.4.0
|
|
43
42
|
Requires-Dist: networkx<4,>=3.0
|
|
44
|
-
Requires-Dist: autogluon.core==1.4.
|
|
45
|
-
Requires-Dist: autogluon.features==1.4.
|
|
46
|
-
Provides-Extra:
|
|
47
|
-
Requires-Dist:
|
|
48
|
-
Requires-Dist: autogluon.core[all]==1.4.1b20251014; extra == "all"
|
|
49
|
-
Requires-Dist: fastai<2.9,>=2.3.1; extra == "all"
|
|
50
|
-
Requires-Dist: loguru; extra == "all"
|
|
51
|
-
Requires-Dist: numpy<2.3.0,>=1.25; extra == "all"
|
|
52
|
-
Requires-Dist: xgboost<3.1,>=2.0; extra == "all"
|
|
53
|
-
Requires-Dist: spacy<3.9; extra == "all"
|
|
54
|
-
Requires-Dist: einops<0.9,>=0.7; extra == "all"
|
|
55
|
-
Requires-Dist: omegaconf; extra == "all"
|
|
56
|
-
Requires-Dist: torch<2.8,>=2.6; extra == "all"
|
|
57
|
-
Requires-Dist: huggingface-hub[torch]; extra == "all"
|
|
58
|
-
Requires-Dist: einx; extra == "all"
|
|
59
|
-
Requires-Dist: lightgbm<4.7,>=4.0; extra == "all"
|
|
60
|
-
Requires-Dist: transformers; extra == "all"
|
|
61
|
-
Requires-Dist: blis<1.2.1,>=0.7.0; (platform_system == "Windows" and python_version == "3.9") and extra == "all"
|
|
43
|
+
Requires-Dist: autogluon.core==1.4.1b20251214
|
|
44
|
+
Requires-Dist: autogluon.features==1.4.1b20251214
|
|
45
|
+
Provides-Extra: lightgbm
|
|
46
|
+
Requires-Dist: lightgbm<4.7,>=4.0; extra == "lightgbm"
|
|
62
47
|
Provides-Extra: catboost
|
|
63
48
|
Requires-Dist: numpy<2.3.0,>=1.25; extra == "catboost"
|
|
64
49
|
Requires-Dist: catboost<1.3,>=1.2; extra == "catboost"
|
|
50
|
+
Provides-Extra: xgboost
|
|
51
|
+
Requires-Dist: xgboost<3.1,>=2.0; extra == "xgboost"
|
|
52
|
+
Provides-Extra: realmlp
|
|
53
|
+
Requires-Dist: pytabkit<1.7,>=1.6; extra == "realmlp"
|
|
54
|
+
Provides-Extra: interpret
|
|
55
|
+
Requires-Dist: interpret-core<0.8,>=0.7.2; extra == "interpret"
|
|
65
56
|
Provides-Extra: fastai
|
|
66
57
|
Requires-Dist: spacy<3.9; extra == "fastai"
|
|
67
|
-
Requires-Dist: torch<2.
|
|
58
|
+
Requires-Dist: torch<2.10,>=2.6; extra == "fastai"
|
|
68
59
|
Requires-Dist: fastai<2.9,>=2.3.1; extra == "fastai"
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
Requires-Dist:
|
|
60
|
+
Provides-Extra: tabm
|
|
61
|
+
Requires-Dist: torch<2.10,>=2.6; extra == "tabm"
|
|
62
|
+
Provides-Extra: tabpfn
|
|
63
|
+
Requires-Dist: tabpfn<2.2,>=2.0.9; extra == "tabpfn"
|
|
64
|
+
Provides-Extra: tabpfnmix
|
|
65
|
+
Requires-Dist: torch<2.10,>=2.6; extra == "tabpfnmix"
|
|
66
|
+
Requires-Dist: huggingface_hub[torch]<1.0; extra == "tabpfnmix"
|
|
67
|
+
Requires-Dist: einops<0.9,>=0.7; extra == "tabpfnmix"
|
|
76
68
|
Provides-Extra: mitra
|
|
77
69
|
Requires-Dist: loguru; extra == "mitra"
|
|
78
70
|
Requires-Dist: einx; extra == "mitra"
|
|
79
71
|
Requires-Dist: omegaconf; extra == "mitra"
|
|
80
|
-
Requires-Dist: torch<2.
|
|
72
|
+
Requires-Dist: torch<2.10,>=2.6; extra == "mitra"
|
|
81
73
|
Requires-Dist: transformers; extra == "mitra"
|
|
82
|
-
Requires-Dist:
|
|
74
|
+
Requires-Dist: huggingface_hub[torch]<1.0; extra == "mitra"
|
|
83
75
|
Requires-Dist: einops<0.9,>=0.7; extra == "mitra"
|
|
76
|
+
Provides-Extra: tabicl
|
|
77
|
+
Requires-Dist: tabicl<0.2,>=0.1.3; extra == "tabicl"
|
|
84
78
|
Provides-Extra: ray
|
|
85
|
-
Requires-Dist: autogluon.core[all]==1.4.
|
|
86
|
-
Provides-Extra: realmlp
|
|
87
|
-
Requires-Dist: pytabkit<1.7,>=1.6; extra == "realmlp"
|
|
79
|
+
Requires-Dist: autogluon.core[all]==1.4.1b20251214; extra == "ray"
|
|
88
80
|
Provides-Extra: skex
|
|
89
81
|
Requires-Dist: scikit-learn-intelex<2025.5,>=2024.0; extra == "skex"
|
|
82
|
+
Provides-Extra: imodels
|
|
83
|
+
Requires-Dist: imodels<2.1.0,>=1.3.10; extra == "imodels"
|
|
90
84
|
Provides-Extra: skl2onnx
|
|
91
|
-
Requires-Dist: skl2onnx<1.
|
|
92
|
-
Requires-Dist:
|
|
93
|
-
Requires-Dist:
|
|
94
|
-
Requires-Dist:
|
|
95
|
-
Requires-Dist:
|
|
85
|
+
Requires-Dist: skl2onnx<1.20.0,>=1.15.0; extra == "skl2onnx"
|
|
86
|
+
Requires-Dist: onnx!=1.16.2,<1.21.0,>=1.13.0; platform_system == "Windows" and extra == "skl2onnx"
|
|
87
|
+
Requires-Dist: onnx<1.21.0,>=1.13.0; platform_system != "Windows" and extra == "skl2onnx"
|
|
88
|
+
Requires-Dist: onnxruntime<1.24.0,>=1.17.0; extra == "skl2onnx"
|
|
89
|
+
Requires-Dist: onnxruntime-gpu<1.24.0,>=1.17.0; (platform_system != "Darwin" and platform_machine != "aarch64") and extra == "skl2onnx"
|
|
90
|
+
Provides-Extra: all
|
|
91
|
+
Requires-Dist: numpy<2.3.0,>=1.25; extra == "all"
|
|
92
|
+
Requires-Dist: xgboost<3.1,>=2.0; extra == "all"
|
|
93
|
+
Requires-Dist: huggingface_hub[torch]<1.0; extra == "all"
|
|
94
|
+
Requires-Dist: omegaconf; extra == "all"
|
|
95
|
+
Requires-Dist: einops<0.9,>=0.7; extra == "all"
|
|
96
|
+
Requires-Dist: lightgbm<4.7,>=4.0; extra == "all"
|
|
97
|
+
Requires-Dist: transformers; extra == "all"
|
|
98
|
+
Requires-Dist: autogluon.core[all]==1.4.1b20251214; extra == "all"
|
|
99
|
+
Requires-Dist: torch<2.10,>=2.6; extra == "all"
|
|
100
|
+
Requires-Dist: spacy<3.9; extra == "all"
|
|
101
|
+
Requires-Dist: einx; extra == "all"
|
|
102
|
+
Requires-Dist: loguru; extra == "all"
|
|
103
|
+
Requires-Dist: catboost<1.3,>=1.2; extra == "all"
|
|
104
|
+
Requires-Dist: fastai<2.9,>=2.3.1; extra == "all"
|
|
96
105
|
Provides-Extra: tabarena
|
|
106
|
+
Requires-Dist: numpy<2.3.0,>=1.25; extra == "tabarena"
|
|
97
107
|
Requires-Dist: xgboost<3.1,>=2.0; extra == "tabarena"
|
|
108
|
+
Requires-Dist: huggingface_hub[torch]<1.0; extra == "tabarena"
|
|
109
|
+
Requires-Dist: omegaconf; extra == "tabarena"
|
|
110
|
+
Requires-Dist: einops<0.9,>=0.7; extra == "tabarena"
|
|
111
|
+
Requires-Dist: tabpfn<2.2,>=2.0.9; extra == "tabarena"
|
|
98
112
|
Requires-Dist: lightgbm<4.7,>=4.0; extra == "tabarena"
|
|
99
113
|
Requires-Dist: transformers; extra == "tabarena"
|
|
100
|
-
Requires-Dist:
|
|
101
|
-
Requires-Dist:
|
|
114
|
+
Requires-Dist: autogluon.core[all]==1.4.1b20251214; extra == "tabarena"
|
|
115
|
+
Requires-Dist: interpret-core<0.8,>=0.7.2; extra == "tabarena"
|
|
116
|
+
Requires-Dist: torch<2.10,>=2.6; extra == "tabarena"
|
|
102
117
|
Requires-Dist: tabicl<0.2,>=0.1.3; extra == "tabarena"
|
|
103
|
-
Requires-Dist: catboost<1.3,>=1.2; extra == "tabarena"
|
|
104
|
-
Requires-Dist: loguru; extra == "tabarena"
|
|
105
|
-
Requires-Dist: tabpfn<2.2,>=2.0.9; extra == "tabarena"
|
|
106
|
-
Requires-Dist: omegaconf; extra == "tabarena"
|
|
107
118
|
Requires-Dist: pytabkit<1.7,>=1.6; extra == "tabarena"
|
|
108
|
-
Requires-Dist: autogluon.core[all]==1.4.1b20251014; extra == "tabarena"
|
|
109
|
-
Requires-Dist: numpy<2.3.0,>=1.25; extra == "tabarena"
|
|
110
119
|
Requires-Dist: spacy<3.9; extra == "tabarena"
|
|
111
|
-
Requires-Dist:
|
|
112
|
-
Requires-Dist:
|
|
113
|
-
Requires-Dist:
|
|
114
|
-
Requires-Dist:
|
|
115
|
-
Requires-Dist: blis<1.2.1,>=0.7.0; (platform_system == "Windows" and python_version == "3.9") and extra == "tabarena"
|
|
116
|
-
Provides-Extra: tabicl
|
|
117
|
-
Requires-Dist: tabicl<0.2,>=0.1.3; extra == "tabicl"
|
|
118
|
-
Provides-Extra: tabm
|
|
119
|
-
Requires-Dist: torch<2.8,>=2.6; extra == "tabm"
|
|
120
|
-
Provides-Extra: tabpfn
|
|
121
|
-
Requires-Dist: tabpfn<2.2,>=2.0.9; extra == "tabpfn"
|
|
122
|
-
Provides-Extra: tabpfnmix
|
|
123
|
-
Requires-Dist: torch<2.8,>=2.6; extra == "tabpfnmix"
|
|
124
|
-
Requires-Dist: huggingface-hub[torch]; extra == "tabpfnmix"
|
|
125
|
-
Requires-Dist: einops<0.9,>=0.7; extra == "tabpfnmix"
|
|
120
|
+
Requires-Dist: einx; extra == "tabarena"
|
|
121
|
+
Requires-Dist: loguru; extra == "tabarena"
|
|
122
|
+
Requires-Dist: catboost<1.3,>=1.2; extra == "tabarena"
|
|
123
|
+
Requires-Dist: fastai<2.9,>=2.3.1; extra == "tabarena"
|
|
126
124
|
Provides-Extra: tests
|
|
127
125
|
Requires-Dist: interpret-core<0.8,>=0.7.2; extra == "tests"
|
|
128
126
|
Requires-Dist: tabicl<0.2,>=0.1.3; extra == "tests"
|
|
129
127
|
Requires-Dist: tabpfn<2.2,>=2.0.9; extra == "tests"
|
|
130
128
|
Requires-Dist: pytabkit<1.7,>=1.6; extra == "tests"
|
|
131
|
-
Requires-Dist: torch<2.
|
|
132
|
-
Requires-Dist:
|
|
129
|
+
Requires-Dist: torch<2.10,>=2.6; extra == "tests"
|
|
130
|
+
Requires-Dist: huggingface_hub[torch]<1.0; extra == "tests"
|
|
133
131
|
Requires-Dist: einops<0.9,>=0.7; extra == "tests"
|
|
134
132
|
Requires-Dist: imodels<2.1.0,>=1.3.10; extra == "tests"
|
|
135
|
-
Requires-Dist: skl2onnx<1.
|
|
136
|
-
Requires-Dist:
|
|
137
|
-
Requires-Dist:
|
|
138
|
-
Requires-Dist:
|
|
139
|
-
Requires-Dist:
|
|
140
|
-
|
|
141
|
-
|
|
133
|
+
Requires-Dist: skl2onnx<1.20.0,>=1.15.0; extra == "tests"
|
|
134
|
+
Requires-Dist: onnx!=1.16.2,<1.21.0,>=1.13.0; platform_system == "Windows" and extra == "tests"
|
|
135
|
+
Requires-Dist: onnx<1.21.0,>=1.13.0; platform_system != "Windows" and extra == "tests"
|
|
136
|
+
Requires-Dist: onnxruntime<1.24.0,>=1.17.0; extra == "tests"
|
|
137
|
+
Requires-Dist: onnxruntime-gpu<1.24.0,>=1.17.0; (platform_system != "Darwin" and platform_machine != "aarch64") and extra == "tests"
|
|
138
|
+
Dynamic: author
|
|
139
|
+
Dynamic: classifier
|
|
140
|
+
Dynamic: description
|
|
141
|
+
Dynamic: description-content-type
|
|
142
|
+
Dynamic: home-page
|
|
143
|
+
Dynamic: license
|
|
144
|
+
Dynamic: license-file
|
|
145
|
+
Dynamic: project-url
|
|
146
|
+
Dynamic: provides-extra
|
|
147
|
+
Dynamic: requires-dist
|
|
148
|
+
Dynamic: requires-python
|
|
149
|
+
Dynamic: summary
|
|
142
150
|
|
|
143
151
|
|
|
144
152
|
|
|
@@ -149,7 +157,7 @@ Requires-Dist: xgboost<3.1,>=2.0; extra == "xgboost"
|
|
|
149
157
|
|
|
150
158
|
[](https://github.com/autogluon/autogluon/releases)
|
|
151
159
|
[](https://anaconda.org/conda-forge/autogluon)
|
|
152
|
-
[](https://pypi.org/project/autogluon/)
|
|
153
161
|
[](https://pepy.tech/project/autogluon)
|
|
154
162
|
[](./LICENSE)
|
|
155
163
|
[](https://discord.gg/wjUmjqAc2N)
|
|
@@ -166,7 +174,7 @@ AutoGluon, developed by AWS AI, automates machine learning tasks enabling you to
|
|
|
166
174
|
|
|
167
175
|
## 💾 Installation
|
|
168
176
|
|
|
169
|
-
AutoGluon is supported on Python 3.
|
|
177
|
+
AutoGluon is supported on Python 3.10 - 3.13 and is available on Linux, MacOS, and Windows.
|
|
170
178
|
|
|
171
179
|
You can install AutoGluon with:
|
|
172
180
|
|
|
@@ -189,8 +197,8 @@ predictions = predictor.predict("test.csv")
|
|
|
189
197
|
| AutoGluon Task | Quickstart | API |
|
|
190
198
|
|:--------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------:|
|
|
191
199
|
| TabularPredictor | [](https://auto.gluon.ai/stable/tutorials/tabular/tabular-quick-start.html) | [](https://auto.gluon.ai/stable/api/autogluon.tabular.TabularPredictor.html) |
|
|
192
|
-
| MultiModalPredictor | [](https://auto.gluon.ai/stable/tutorials/multimodal/multimodal_prediction/multimodal-quick-start.html) | [](https://auto.gluon.ai/stable/api/autogluon.multimodal.MultiModalPredictor.html) |
|
|
193
200
|
| TimeSeriesPredictor | [](https://auto.gluon.ai/stable/tutorials/timeseries/forecasting-quick-start.html) | [](https://auto.gluon.ai/stable/api/autogluon.timeseries.TimeSeriesPredictor.html) |
|
|
201
|
+
| MultiModalPredictor | [](https://auto.gluon.ai/stable/tutorials/multimodal/multimodal_prediction/multimodal-quick-start.html) | [](https://auto.gluon.ai/stable/api/autogluon.multimodal.MultiModalPredictor.html) |
|
|
194
202
|
|
|
195
203
|
## :mag: Resources
|
|
196
204
|
|
|
@@ -213,7 +221,10 @@ Below is a curated list of recent tutorials and talks on AutoGluon. A comprehens
|
|
|
213
221
|
- [Benchmarking Multimodal AutoML for Tabular Data with Text Fields](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/file/9bf31c7ff062936a96d3c8bd1f8f2ff3-Paper-round2.pdf) (*NeurIPS*, 2021) ([BibTeX](CITING.md#autogluonmultimodal))
|
|
214
222
|
- [XTab: Cross-table Pretraining for Tabular Transformers](https://proceedings.mlr.press/v202/zhu23k/zhu23k.pdf) (*ICML*, 2023)
|
|
215
223
|
- [AutoGluon-TimeSeries: AutoML for Probabilistic Time Series Forecasting](https://arxiv.org/abs/2308.05566) (*AutoML Conf*, 2023) ([BibTeX](CITING.md#autogluontimeseries))
|
|
216
|
-
- [TabRepo: A Large Scale Repository of Tabular Model Evaluations and its AutoML Applications](https://arxiv.org/pdf/2311.02971.pdf) (*
|
|
224
|
+
- [TabRepo: A Large Scale Repository of Tabular Model Evaluations and its AutoML Applications](https://arxiv.org/pdf/2311.02971.pdf) (*AutoML Conf*, 2024)
|
|
225
|
+
- [AutoGluon-Multimodal (AutoMM): Supercharging Multimodal AutoML with Foundation Models](https://arxiv.org/pdf/2404.16233) (*AutoML Conf*, 2024) ([BibTeX](CITING.md#autogluonmultimodal))
|
|
226
|
+
- [Multi-layer Stack Ensembles for Time Series Forecasting](https://arxiv.org/abs/2511.15350) (*AutoML Conf*, 2025) ([BibTeX](CITING.md#autogluontimeseries))
|
|
227
|
+
- [Chronos-2: From Univariate to Universal Forecasting](https://arxiv.org/abs/2510.15821) (*Arxiv*, 2025) ([BibTeX](CITING.md#autogluontimeseries))
|
|
217
228
|
|
|
218
229
|
### Articles
|
|
219
230
|
- [AutoGluon-TimeSeries: Every Time Series Forecasting Model In One Library](https://towardsdatascience.com/autogluon-timeseries-every-time-series-forecasting-model-in-one-library-29a3bf6879db) (*Towards Data Science*, Jan 2024)
|
|
@@ -239,5 +250,3 @@ We are actively accepting code contributions to the AutoGluon project. If you ar
|
|
|
239
250
|
## :classical_building: License
|
|
240
251
|
|
|
241
252
|
This library is licensed under the Apache 2.0 License.
|
|
242
|
-
|
|
243
|
-
|
{autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.4.1b20251214.dist-info}/RECORD
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
autogluon.tabular-1.4.
|
|
1
|
+
autogluon.tabular-1.4.1b20251214-py3.11-nspkg.pth,sha256=kAlKxjI5mE3Pwwqphu2maN5OBQk8W8ew70e_qbI1c6A,482
|
|
2
2
|
autogluon/tabular/__init__.py,sha256=2OXpJCvENRHubBTYNIPpHX93WWuFZzsJBtTZbNVHVas,400
|
|
3
|
-
autogluon/tabular/version.py,sha256=
|
|
3
|
+
autogluon/tabular/version.py,sha256=CYNrmn5KfprHt9fJxfHv56jQZ5Q-BPbHMgM1kWYZDD8,91
|
|
4
4
|
autogluon/tabular/configs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
5
|
autogluon/tabular/configs/config_helper.py,sha256=Rby5gRhuY5IlZWdKbtsmzbSt948B97qxwQ2f1MbH_38,21070
|
|
6
6
|
autogluon/tabular/configs/feature_generator_presets.py,sha256=EV5Ym8VW15q92MwOUpTi7wZFS2QooM51fLg3RdUsn-M,1223
|
|
@@ -27,14 +27,14 @@ autogluon/tabular/models/automm/automm_model.py,sha256=MoydDuPEd5atbUPlVDzWLTKLB
|
|
|
27
27
|
autogluon/tabular/models/automm/ft_transformer.py,sha256=X-IEi5uKme7SoRcHnPjGTByzrjCB85I7RpB0hS36TLQ,3897
|
|
28
28
|
autogluon/tabular/models/catboost/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
29
29
|
autogluon/tabular/models/catboost/callbacks.py,sha256=QvyiynQoxjvfYaYwGNSF5N3gc_wqI9mi1nQiawL0EJ4,7194
|
|
30
|
-
autogluon/tabular/models/catboost/catboost_model.py,sha256=
|
|
30
|
+
autogluon/tabular/models/catboost/catboost_model.py,sha256=tAT_eklRJDARJsbS72-Nn8PxLmKgIvffzjjrTI1XMXM,18041
|
|
31
31
|
autogluon/tabular/models/catboost/catboost_softclass_utils.py,sha256=UiW0SUb3hFueW5qYtQn6Sbk7Wg7BWN4jqKWeFtbMvgU,3919
|
|
32
32
|
autogluon/tabular/models/catboost/catboost_utils.py,sha256=zJMIsbgyW_JH0eULhUeu_TWR0Qfmf34CnED7c7NvXBw,3899
|
|
33
33
|
autogluon/tabular/models/catboost/hyperparameters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
34
|
autogluon/tabular/models/catboost/hyperparameters/parameters.py,sha256=Hxi4mPTc2ML9GdpW0TalkDgtsYJLwpEcd-LiyLOsmlA,956
|
|
35
35
|
autogluon/tabular/models/catboost/hyperparameters/searchspaces.py,sha256=Oe86ixuvd1xJCdSHs2Oh5Ifx0501YJBsdyL2l9Z4nxM,1458
|
|
36
36
|
autogluon/tabular/models/ebm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
37
|
-
autogluon/tabular/models/ebm/ebm_model.py,sha256=
|
|
37
|
+
autogluon/tabular/models/ebm/ebm_model.py,sha256=PyocCEPxByB-E5gRCZitI5gsP6DVYlxmRx8bbZ31guA,8524
|
|
38
38
|
autogluon/tabular/models/ebm/hyperparameters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
39
39
|
autogluon/tabular/models/ebm/hyperparameters/parameters.py,sha256=IbDv3Ufx8CGHvejqSbAggZKlMq5X9k0Ggclm_DCoiII,1080
|
|
40
40
|
autogluon/tabular/models/ebm/hyperparameters/searchspaces.py,sha256=G6zgHERKt_KJlVfZ06tFKw2aOUuM7DdDyCm0s5RBXoc,2191
|
|
@@ -43,7 +43,7 @@ autogluon/tabular/models/fastainn/callbacks.py,sha256=3WvOEwqd1YAVInooKsFOTzAkCL
|
|
|
43
43
|
autogluon/tabular/models/fastainn/fastai_helpers.py,sha256=gGYzyrAFl8hi8GnsemZNLGZn5xr7cyJXdFl08PIlza4,1393
|
|
44
44
|
autogluon/tabular/models/fastainn/imports_helper.py,sha256=ICxA8ty47-oZu0Q9AjKCQe8uVi340Iu0NFruxvJPrbA,330
|
|
45
45
|
autogluon/tabular/models/fastainn/quantile_helpers.py,sha256=d89GKvSRBgOy9EqcDI83MK5sqPRxP6JJ3BmPLmKnB0o,1808
|
|
46
|
-
autogluon/tabular/models/fastainn/tabular_nn_fastai.py,sha256=
|
|
46
|
+
autogluon/tabular/models/fastainn/tabular_nn_fastai.py,sha256=FqT6xqhU2XoTWJ0yY_ZmT3JI6ranl63vpdPkn6JFbos,29666
|
|
47
47
|
autogluon/tabular/models/fastainn/hyperparameters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
48
48
|
autogluon/tabular/models/fastainn/hyperparameters/parameters.py,sha256=DkQwAZZ7CuODKoljr-yrkx-uFxBSPRxkKuvPdwO-UhQ,2069
|
|
49
49
|
autogluon/tabular/models/fastainn/hyperparameters/searchspaces.py,sha256=5qdknZDrHtdPdrhSqjamYQrCxvupXvlN3bVGEPgs48E,1660
|
|
@@ -62,20 +62,20 @@ autogluon/tabular/models/knn/knn_rapids_model.py,sha256=0FFApNZFH8nyrDqlBSUV7jO-
|
|
|
62
62
|
autogluon/tabular/models/knn/knn_utils.py,sha256=XU1cxVXp1BAoQnja2_KmSIn9_q9gZkjAya7-9b0uStk,7455
|
|
63
63
|
autogluon/tabular/models/lgb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
64
64
|
autogluon/tabular/models/lgb/callbacks.py,sha256=KJB1KmebA88qHT206KSfvm5NamGuv5lRzy7O9dOwW-M,12243
|
|
65
|
-
autogluon/tabular/models/lgb/lgb_model.py,sha256=
|
|
65
|
+
autogluon/tabular/models/lgb/lgb_model.py,sha256=kRIcBBIDMJ2inaZeJXO5uhAG0qUigwYseJoFQ7jzqQE,27415
|
|
66
66
|
autogluon/tabular/models/lgb/lgb_utils.py,sha256=jzTDTzP-z7gcBGZyy1_0YkyTOLbU5DLeRqtil4FCZPI,7382
|
|
67
67
|
autogluon/tabular/models/lgb/hyperparameters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
68
68
|
autogluon/tabular/models/lgb/hyperparameters/parameters.py,sha256=LLEQ-Ns3HElWBsFJx3ogRV7L6qw_nXlcl7EyO0C0fVQ,1336
|
|
69
69
|
autogluon/tabular/models/lgb/hyperparameters/searchspaces.py,sha256=tvNNR7niWz_B-PndYQXb6vVNABxSfBYRHj6ZVQJ1x2E,1930
|
|
70
70
|
autogluon/tabular/models/lr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
71
|
-
autogluon/tabular/models/lr/lr_model.py,sha256=
|
|
72
|
-
autogluon/tabular/models/lr/lr_preprocessing_utils.py,sha256=
|
|
71
|
+
autogluon/tabular/models/lr/lr_model.py,sha256=2A6e8Itw-PgjOLjVXeo8bJwFQuVSGYwJNVxhHxFQXlw,15732
|
|
72
|
+
autogluon/tabular/models/lr/lr_preprocessing_utils.py,sha256=tgb75V6zHfMJh8m9GDs5404ItdfwNakqykTk0qjBtFE,1045
|
|
73
73
|
autogluon/tabular/models/lr/lr_rapids_model.py,sha256=XIB1KCPPfBZMxTRC3Wc1Dsl5NTMQSM_m8Uc2igyTLX8,3939
|
|
74
74
|
autogluon/tabular/models/lr/hyperparameters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
75
75
|
autogluon/tabular/models/lr/hyperparameters/parameters.py,sha256=Hr5YC13zjbt3CfCbzGj8iXUIuDn-Q7FvDT2uSuiSVlM,1414
|
|
76
76
|
autogluon/tabular/models/lr/hyperparameters/searchspaces.py,sha256=Igywc-B6qJ9EBLdasrDhW-Ot5FGirIzbXLwv5HRe5Xo,276
|
|
77
77
|
autogluon/tabular/models/mitra/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
78
|
-
autogluon/tabular/models/mitra/mitra_model.py,sha256=
|
|
78
|
+
autogluon/tabular/models/mitra/mitra_model.py,sha256=TzjozU19zQLU09S2tM8Sfe7TiTBSDDjld-tVt5L1JGQ,13954
|
|
79
79
|
autogluon/tabular/models/mitra/sklearn_interface.py,sha256=vyg8kkmYKzEJRWiehEqEsgZeOCV20tnZAZaaaJkwDuA,17739
|
|
80
80
|
autogluon/tabular/models/mitra/_internal/__init__.py,sha256=dN2dz1pGMgQTFiSf9oYbyq23iJUxV8QNlOX3qw3KUO4,35
|
|
81
81
|
autogluon/tabular/models/mitra/_internal/config/__init__.py,sha256=Exu_Sx6-K-D5peDQ_TibsjZpqAALs2-9IXfq8hu1mwU,40
|
|
@@ -101,23 +101,23 @@ autogluon/tabular/models/mitra/_internal/models/tab2d.py,sha256=o_S572-nKrhwxmEF
|
|
|
101
101
|
autogluon/tabular/models/mitra/_internal/utils/__init__.py,sha256=0mhykAqjMmcEc8Y2od_DMPMk8f66LZHWM7qFdUrPddU,34
|
|
102
102
|
autogluon/tabular/models/mitra/_internal/utils/set_seed.py,sha256=UnXzYfhmfT_tNAofKtLkKpwB9b6HVf9cpI4mKvoBuNM,340
|
|
103
103
|
autogluon/tabular/models/realmlp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
104
|
-
autogluon/tabular/models/realmlp/realmlp_model.py,sha256=
|
|
104
|
+
autogluon/tabular/models/realmlp/realmlp_model.py,sha256=3pe_yhOGW8cbX3KgNs25s3FP0P3FzVSAS-hd4jMFjDg,14573
|
|
105
105
|
autogluon/tabular/models/rf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
106
|
-
autogluon/tabular/models/rf/rf_model.py,sha256=
|
|
106
|
+
autogluon/tabular/models/rf/rf_model.py,sha256=auvNHx0qD9Pz8rS6yNIuG9cHzFNquv8fOVS7FWZNIAw,21721
|
|
107
107
|
autogluon/tabular/models/rf/rf_quantile.py,sha256=2S8FE8po9lMnZaeKuVkzOUFOcdil46ZbFqm49OuvNZY,36460
|
|
108
108
|
autogluon/tabular/models/rf/rf_rapids_model.py,sha256=3s-8M11dzCl_2Lu5iB3H8YjHLgyP_SElrm_4w_HfmqY,2028
|
|
109
109
|
autogluon/tabular/models/rf/compilers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
110
110
|
autogluon/tabular/models/rf/compilers/native.py,sha256=HhaqQRkVuf9UEEJPsHcdYCmuWBMYtyqRwwB_N2qxG2M,1313
|
|
111
111
|
autogluon/tabular/models/rf/compilers/onnx.py,sha256=pvaZWdl2JJaE2pFU0mFugzhnybePqe0x1-5oLOvogA0,4318
|
|
112
112
|
autogluon/tabular/models/tabicl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
113
|
-
autogluon/tabular/models/tabicl/tabicl_model.py,sha256=
|
|
113
|
+
autogluon/tabular/models/tabicl/tabicl_model.py,sha256=_Eq3g9babdC17kyvAA0rIqtZEtiRGwM2XngkbWevXpU,6283
|
|
114
114
|
autogluon/tabular/models/tabm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
115
115
|
autogluon/tabular/models/tabm/_tabm_internal.py,sha256=fRQ-s5PN94kWqf3LRDen7su_fd-d332YKxdms30FoZM,21066
|
|
116
116
|
autogluon/tabular/models/tabm/rtdl_num_embeddings.py,sha256=XssNMaUM0E0G8Grzl_VkVsLt2FcMf3I4cplfvQdVum0,30156
|
|
117
|
-
autogluon/tabular/models/tabm/tabm_model.py,sha256=
|
|
117
|
+
autogluon/tabular/models/tabm/tabm_model.py,sha256=_SGc7R87ug9m8KGd_BgC9maJ7sjOAlYB9vtg1omwOto,13640
|
|
118
118
|
autogluon/tabular/models/tabm/tabm_reference.py,sha256=byyP6lcJjA4THbP1VDTgJkj62zyz2S3mEvxWB-kFROw,21944
|
|
119
119
|
autogluon/tabular/models/tabpfnmix/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
120
|
-
autogluon/tabular/models/tabpfnmix/tabpfnmix_model.py,sha256=
|
|
120
|
+
autogluon/tabular/models/tabpfnmix/tabpfnmix_model.py,sha256=NAuV3rJia-UNnFwiFU5tkz6vzZ2lokQ_12vUJ3E6wAA,16498
|
|
121
121
|
autogluon/tabular/models/tabpfnmix/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
122
122
|
autogluon/tabular/models/tabpfnmix/_internal/tabpfnmix_classifier.py,sha256=_WIO_YQBUCfprKYLHxUNEICPb5XWZw4zbw00DuiTk_s,3426
|
|
123
123
|
autogluon/tabular/models/tabpfnmix/_internal/tabpfnmix_regressor.py,sha256=J6JvrK6L6y3s-Ah6sHQdjSK0mwAMP-Wy3RRBwzB0AoA,3196
|
|
@@ -143,7 +143,7 @@ autogluon/tabular/models/tabpfnmix/_internal/models/foundation/foundation_transf
|
|
|
143
143
|
autogluon/tabular/models/tabpfnmix/_internal/results/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
144
144
|
autogluon/tabular/models/tabpfnmix/_internal/results/prediction_metrics.py,sha256=1tRPHyViSSLJ7BkQJi6wai-PwXJ56od86Dy1WWKWZq4,1743
|
|
145
145
|
autogluon/tabular/models/tabpfnv2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
146
|
-
autogluon/tabular/models/tabpfnv2/tabpfnv2_model.py,sha256=
|
|
146
|
+
autogluon/tabular/models/tabpfnv2/tabpfnv2_model.py,sha256=nXZcq4SMV54dciOKFM57Suc9eVyXQXy-2iN6moRt2b8,14801
|
|
147
147
|
autogluon/tabular/models/tabpfnv2/rfpfn/__init__.py,sha256=yE5XAhGxKEFV0JcelZ_JTQZIWGlVEVUQ9a-lxcH_Esc,585
|
|
148
148
|
autogluon/tabular/models/tabpfnv2/rfpfn/configs.py,sha256=lzBY9kKOeBZACVrtRDPHF4ATs9g1rxyNnIs2CMjE20c,1175
|
|
149
149
|
autogluon/tabular/models/tabpfnv2/rfpfn/scoring_utils.py,sha256=uvHsfvnnMdg4tP3_7zAilktkw7nr65LaqfVKXabXAow,6785
|
|
@@ -159,7 +159,7 @@ autogluon/tabular/models/tabular_nn/hyperparameters/__init__.py,sha256=47DEQpj8H
|
|
|
159
159
|
autogluon/tabular/models/tabular_nn/hyperparameters/parameters.py,sha256=kGvfuDZa9wDCCTEeytVLKhOAeR0pCcoVNJcWjketmBI,6375
|
|
160
160
|
autogluon/tabular/models/tabular_nn/hyperparameters/searchspaces.py,sha256=pT9cJ3MaWPnaQwAf47Yz6f0-L9qDBknahERbggAp52U,2810
|
|
161
161
|
autogluon/tabular/models/tabular_nn/torch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
162
|
-
autogluon/tabular/models/tabular_nn/torch/tabular_nn_torch.py,sha256=
|
|
162
|
+
autogluon/tabular/models/tabular_nn/torch/tabular_nn_torch.py,sha256=TGVMv_ClKh0iYVVCqgd19DE-1fXk_VODpsXIMvzI3Sw,42978
|
|
163
163
|
autogluon/tabular/models/tabular_nn/torch/tabular_torch_dataset.py,sha256=RdnQGZSrvY1iuJB4JTANniH3Dorw-DP0Em_JK3_h7RM,13497
|
|
164
164
|
autogluon/tabular/models/tabular_nn/torch/torch_network_modules.py,sha256=Qc3PwXTD8A7PgXi6EGuaBCrN3jsFAXDLCW7i6tE5wYI,11338
|
|
165
165
|
autogluon/tabular/models/tabular_nn/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -170,7 +170,7 @@ autogluon/tabular/models/text_prediction/__init__.py,sha256=47DEQpj8HBSa-_TImW-5
|
|
|
170
170
|
autogluon/tabular/models/text_prediction/text_prediction_v1_model.py,sha256=PBN7F98qgEAO6U76rV_hxZfAmKr_XpVKjElOdBvfX8c,1090
|
|
171
171
|
autogluon/tabular/models/xgboost/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
172
172
|
autogluon/tabular/models/xgboost/callbacks.py,sha256=PuRQUg3AEjgvFa-dpstRFoEVM9jHDe5W4XYSdDPRqoE,7009
|
|
173
|
-
autogluon/tabular/models/xgboost/xgboost_model.py,sha256=
|
|
173
|
+
autogluon/tabular/models/xgboost/xgboost_model.py,sha256=tKVLvBnuTbDaFwBRVDZ5ADo4PjBF2FDR93Ib86WYTMM,15630
|
|
174
174
|
autogluon/tabular/models/xgboost/xgboost_utils.py,sha256=FVqZ8h4JAe_pifSvNx83cLZHwsuzTXylrrcan07AoNo,5757
|
|
175
175
|
autogluon/tabular/models/xgboost/hyperparameters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
176
176
|
autogluon/tabular/models/xgboost/hyperparameters/parameters.py,sha256=ay6bVVpiPzftbtz6TTS76w7j4vjDjzHFpuf2Bjf6Zu4,1673
|
|
@@ -179,12 +179,12 @@ autogluon/tabular/models/xt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
|
|
|
179
179
|
autogluon/tabular/models/xt/xt_model.py,sha256=qOHJ5h1lHI7uYJfbl0BWm-29R3MNp2WeZB9ptcq5Xis,1003
|
|
180
180
|
autogluon/tabular/predictor/__init__.py,sha256=zCMgjxQlWpDWnr1l1xjBCiK3rWC3N3RoD8UXBnazT74,107
|
|
181
181
|
autogluon/tabular/predictor/interpretable_predictor.py,sha256=5UeKgnMFsfY65tiO3kxfHBPr03lyswLrgdtjPhI0Y7Q,6934
|
|
182
|
-
autogluon/tabular/predictor/predictor.py,sha256=
|
|
182
|
+
autogluon/tabular/predictor/predictor.py,sha256=fjw7CQALXZ7AR18ryLm4xWwDzRBeUnrmNubPS8U_pmQ,361223
|
|
183
183
|
autogluon/tabular/registry/__init__.py,sha256=vZpzX4Xve7bfA9crt5LxjgQv9PPfxbi1E1U6Im0Y_xU,93
|
|
184
184
|
autogluon/tabular/registry/_ag_model_registry.py,sha256=2Zx5qxXvOdXIbL1FKslNh2M_JM2YG_7GvsCMFF11wDY,1578
|
|
185
185
|
autogluon/tabular/registry/_model_registry.py,sha256=Rl8Q7BLzaif4hxNxJF20xGE02vrWwh2ZuUaTmA-UJnE,6824
|
|
186
186
|
autogluon/tabular/testing/__init__.py,sha256=XrEGLmMdmRT6QHNR13M9wna57LO4O3Q4tt27Ca8omAc,79
|
|
187
|
-
autogluon/tabular/testing/fit_helper.py,sha256=
|
|
187
|
+
autogluon/tabular/testing/fit_helper.py,sha256=pj3P0ENMDhr04laxsLL0_IDX-8msMFo9Wn5XSLFCaqI,21092
|
|
188
188
|
autogluon/tabular/testing/generate_datasets.py,sha256=nvcAmI-tOh5fwx_ZTx2aRa1n7CsXb96wbR-xqNy1C5w,3884
|
|
189
189
|
autogluon/tabular/testing/model_fit_helper.py,sha256=ZjWpw2nyeFnsrccmkfQtx3qbA8HJx282XX2rwdS-LIs,3808
|
|
190
190
|
autogluon/tabular/trainer/__init__.py,sha256=PW_PGL-tWoQzx3ES2S53bQEZOtsRWTYiM9QdOqsk0dI,38
|
|
@@ -195,11 +195,11 @@ autogluon/tabular/trainer/model_presets/presets.py,sha256=hoWADaOG576Q_XLV1nY_ju
|
|
|
195
195
|
autogluon/tabular/trainer/model_presets/presets_distill.py,sha256=MnFC2GJc6RmDBNAGbsO2XMfo3PjR8cUrZoilWW8gTYQ,3295
|
|
196
196
|
autogluon/tabular/tuning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
197
197
|
autogluon/tabular/tuning/feature_pruner.py,sha256=9iNku8gVbYEkjuKlyITPJDicsNkoraaQOlINQq9iZlQ,6877
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
198
|
+
autogluon_tabular-1.4.1b20251214.dist-info/licenses/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
|
|
199
|
+
autogluon_tabular-1.4.1b20251214.dist-info/licenses/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
|
|
200
|
+
autogluon_tabular-1.4.1b20251214.dist-info/METADATA,sha256=XbmjT9lmPhMkbhK6fgfIBrv6zMe1EMZ3wvoTx_Waons,17015
|
|
201
|
+
autogluon_tabular-1.4.1b20251214.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
|
|
202
|
+
autogluon_tabular-1.4.1b20251214.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
|
203
|
+
autogluon_tabular-1.4.1b20251214.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
|
204
|
+
autogluon_tabular-1.4.1b20251214.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
205
|
+
autogluon_tabular-1.4.1b20251214.dist-info/RECORD,,
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('autogluon',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('autogluon', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('autogluon', [os.path.dirname(p)])));m = m or sys.modules.setdefault('autogluon', types.ModuleType('autogluon'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.4.1b20251214.dist-info}/zip-safe
RENAMED
|
File without changes
|