autogluon.tabular 1.3.2b20250714__py3-none-any.whl → 1.3.2b20250716__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. autogluon/tabular/models/catboost/catboost_model.py +9 -6
  2. autogluon/tabular/models/catboost/catboost_utils.py +10 -0
  3. autogluon/tabular/models/lgb/lgb_model.py +2 -1
  4. autogluon/tabular/models/mitra/_internal/__init__.py +1 -0
  5. autogluon/tabular/models/mitra/_internal/config/__init__.py +1 -0
  6. autogluon/tabular/models/mitra/_internal/config/config_run.py +3 -3
  7. autogluon/tabular/models/mitra/_internal/config/enums.py +20 -3
  8. autogluon/tabular/models/mitra/_internal/core/__init__.py +1 -0
  9. autogluon/tabular/models/mitra/_internal/core/get_loss.py +22 -23
  10. autogluon/tabular/models/mitra/_internal/core/prediction_metrics.py +11 -13
  11. autogluon/tabular/models/mitra/_internal/core/trainer_finetune.py +69 -75
  12. autogluon/tabular/models/mitra/_internal/data/__init__.py +1 -0
  13. autogluon/tabular/models/mitra/_internal/data/preprocessor.py +57 -57
  14. autogluon/tabular/models/mitra/_internal/models/__init__.py +1 -0
  15. autogluon/tabular/models/mitra/_internal/models/tab2d.py +23 -26
  16. autogluon/tabular/models/mitra/_internal/utils/__init__.py +1 -0
  17. autogluon/tabular/models/mitra/mitra_model.py +64 -24
  18. autogluon/tabular/models/mitra/sklearn_interface.py +52 -42
  19. autogluon/tabular/models/realmlp/realmlp_model.py +11 -3
  20. autogluon/tabular/models/tabicl/tabicl_model.py +4 -1
  21. autogluon/tabular/models/tabm/_tabm_internal.py +4 -3
  22. autogluon/tabular/models/tabm/tabm_model.py +7 -3
  23. autogluon/tabular/models/tabm/tabm_reference.py +21 -19
  24. autogluon/tabular/models/tabpfnv2/tabpfnv2_model.py +10 -9
  25. autogluon/tabular/testing/fit_helper.py +2 -2
  26. autogluon/tabular/version.py +1 -1
  27. {autogluon.tabular-1.3.2b20250714.dist-info → autogluon.tabular-1.3.2b20250716.dist-info}/METADATA +11 -11
  28. {autogluon.tabular-1.3.2b20250714.dist-info → autogluon.tabular-1.3.2b20250716.dist-info}/RECORD +35 -29
  29. /autogluon.tabular-1.3.2b20250714-py3.9-nspkg.pth → /autogluon.tabular-1.3.2b20250716-py3.9-nspkg.pth +0 -0
  30. {autogluon.tabular-1.3.2b20250714.dist-info → autogluon.tabular-1.3.2b20250716.dist-info}/LICENSE +0 -0
  31. {autogluon.tabular-1.3.2b20250714.dist-info → autogluon.tabular-1.3.2b20250716.dist-info}/NOTICE +0 -0
  32. {autogluon.tabular-1.3.2b20250714.dist-info → autogluon.tabular-1.3.2b20250716.dist-info}/WHEEL +0 -0
  33. {autogluon.tabular-1.3.2b20250714.dist-info → autogluon.tabular-1.3.2b20250716.dist-info}/namespace_packages.txt +0 -0
  34. {autogluon.tabular-1.3.2b20250714.dist-info → autogluon.tabular-1.3.2b20250716.dist-info}/top_level.txt +0 -0
  35. {autogluon.tabular-1.3.2b20250714.dist-info → autogluon.tabular-1.3.2b20250716.dist-info}/zip-safe +0 -0
@@ -1,20 +1,20 @@
1
- import numpy as np
2
1
  import time
3
- import torch
4
- import pandas as pd
5
-
6
2
  from pathlib import Path
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+ import torch
7
7
  from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
8
8
 
9
- from ._internal.data.dataset_split import make_stratified_dataset_split
10
9
  from ._internal.config.config_run import ConfigRun
10
+ from ._internal.config.enums import ModelName
11
11
  from ._internal.core.trainer_finetune import TrainerFinetune
12
+ from ._internal.data.dataset_split import make_stratified_dataset_split
12
13
  from ._internal.models.tab2d import Tab2D
13
- from ._internal.config.enums import ModelName
14
14
 
15
15
  # Hyperparameter search space
16
16
  DEFAULT_FINE_TUNE = True # [True, False]
17
- DEFAULT_FINE_TUNE_STEPS = 50 # [50, 60, 70, 80, 90, 100]
17
+ DEFAULT_FINE_TUNE_STEPS = 50 # [50, 60, 70, 80, 90, 100]
18
18
  DEFAULT_CLS_METRIC = 'log_loss' # ['log_loss', 'accuracy', 'auc']
19
19
  DEFAULT_REG_METRIC = 'mse' # ['mse', 'mae', 'rmse', 'r2']
20
20
  SHUFFLE_CLASSES = False # [True, False]
@@ -32,7 +32,17 @@ DEFAULT_REG_MODEL = 'autogluon/mitra-regressor'
32
32
  # Constants
33
33
  SEED = 0
34
34
  DEFAULT_MODEL_TYPE = "Tab2D"
35
- DEFAULT_DEVICE = "cuda"
35
+
36
+ def _get_default_device():
37
+ """Get the best available device for the current system."""
38
+ if torch.cuda.is_available():
39
+ return "cuda"
40
+ elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
41
+ return "mps" # Apple silicon
42
+ else:
43
+ return "cpu"
44
+
45
+ DEFAULT_DEVICE = _get_default_device()
36
46
  DEFAULT_ENSEMBLE = 1
37
47
  DEFAULT_DIM = 512
38
48
  DEFAULT_LAYERS = 12
@@ -43,13 +53,13 @@ USE_HF = True # Use Hugging Face pretrained models if available
43
53
 
44
54
  class MitraBase(BaseEstimator):
45
55
  """Base class for Mitra models with common functionality."""
46
-
47
- def __init__(self,
48
- model_type=DEFAULT_MODEL_TYPE,
49
- n_estimators=DEFAULT_ENSEMBLE,
50
- device=DEFAULT_DEVICE,
56
+
57
+ def __init__(self,
58
+ model_type=DEFAULT_MODEL_TYPE,
59
+ n_estimators=DEFAULT_ENSEMBLE,
60
+ device=DEFAULT_DEVICE,
51
61
  fine_tune=DEFAULT_FINE_TUNE,
52
- fine_tune_steps=DEFAULT_FINE_TUNE_STEPS,
62
+ fine_tune_steps=DEFAULT_FINE_TUNE_STEPS,
53
63
  metric=DEFAULT_CLS_METRIC,
54
64
  state_dict=None,
55
65
  hf_general_model=DEFAULT_GENERAL_MODEL,
@@ -155,7 +165,7 @@ class MitraBase(BaseEstimator):
155
165
 
156
166
  return cfg, Tab2D
157
167
 
158
-
168
+
159
169
  def _split_data(self, X, y):
160
170
  """Split data into training and validation sets."""
161
171
  if hasattr(self, 'task') and self.task == 'classification':
@@ -165,7 +175,7 @@ class MitraBase(BaseEstimator):
165
175
  val_indices = np.random.choice(range(len(X)), int(DEFAULT_VALIDATION_SPLIT * len(X)), replace=False).tolist()
166
176
  train_indices = [i for i in range(len(X)) if i not in val_indices]
167
177
  return X[train_indices], X[val_indices], y[train_indices], y[val_indices]
168
-
178
+
169
179
  def _train_ensemble(self, X_train, y_train, X_valid, y_valid, task, dim_output, n_classes=0, time_limit=None):
170
180
  """Train the ensemble of models."""
171
181
 
@@ -175,7 +185,7 @@ class MitraBase(BaseEstimator):
175
185
  while not (success and cfg.hyperparams["max_samples_support"] > 0 and cfg.hyperparams["max_samples_query"] > 0):
176
186
  try:
177
187
  self.trainers.clear()
178
-
188
+
179
189
  self.train_time = 0
180
190
  for _ in range(self.n_estimators):
181
191
  if USE_HF:
@@ -212,7 +222,7 @@ class MitraBase(BaseEstimator):
212
222
 
213
223
  self.trainers.append(trainer)
214
224
  self.train_time += end_time - start_time
215
-
225
+
216
226
  success = True
217
227
 
218
228
  except torch.cuda.OutOfMemoryError:
@@ -233,24 +243,24 @@ class MitraBase(BaseEstimator):
233
243
  )
234
244
  print(f"Reducing max_samples_query from {cfg.hyperparams['max_samples_query'] * 2}"
235
245
  f"to {cfg.hyperparams['max_samples_query']} due to OOM error.")
236
-
246
+
237
247
  if not success:
238
248
  raise RuntimeError(
239
- f"Failed to train Mitra model after multiple attempts due to out of memory error."
249
+ "Failed to train Mitra model after multiple attempts due to out of memory error."
240
250
  )
241
-
251
+
242
252
  return self
243
253
 
244
254
 
245
255
  class MitraClassifier(MitraBase, ClassifierMixin):
246
256
  """Classifier implementation of Mitra model."""
247
257
 
248
- def __init__(self,
249
- model_type=DEFAULT_MODEL_TYPE,
250
- n_estimators=DEFAULT_ENSEMBLE,
251
- device=DEFAULT_DEVICE,
258
+ def __init__(self,
259
+ model_type=DEFAULT_MODEL_TYPE,
260
+ n_estimators=DEFAULT_ENSEMBLE,
261
+ device=DEFAULT_DEVICE,
252
262
  fine_tune=DEFAULT_FINE_TUNE,
253
- fine_tune_steps=DEFAULT_FINE_TUNE_STEPS,
263
+ fine_tune_steps=DEFAULT_FINE_TUNE_STEPS,
254
264
  metric=DEFAULT_CLS_METRIC,
255
265
  state_dict=None,
256
266
  patience=PATIENCE,
@@ -265,12 +275,12 @@ class MitraClassifier(MitraBase, ClassifierMixin):
265
275
  ):
266
276
  """Initialize the classifier."""
267
277
  super().__init__(
268
- model_type,
269
- n_estimators,
270
- device,
278
+ model_type,
279
+ n_estimators,
280
+ device,
271
281
  fine_tune,
272
282
  fine_tune_steps,
273
- metric,
283
+ metric,
274
284
  state_dict,
275
285
  patience=patience,
276
286
  lr=lr,
@@ -283,7 +293,7 @@ class MitraClassifier(MitraBase, ClassifierMixin):
283
293
  seed=seed,
284
294
  )
285
295
  self.task = 'classification'
286
-
296
+
287
297
  def fit(self, X, y, X_val = None, y_val = None, time_limit = None):
288
298
  """
289
299
  Fit the ensemble of models.
@@ -367,12 +377,12 @@ class MitraClassifier(MitraBase, ClassifierMixin):
367
377
  class MitraRegressor(MitraBase, RegressorMixin):
368
378
  """Regressor implementation of Mitra model."""
369
379
 
370
- def __init__(self,
371
- model_type=DEFAULT_MODEL_TYPE,
372
- n_estimators=DEFAULT_ENSEMBLE,
373
- device=DEFAULT_DEVICE,
380
+ def __init__(self,
381
+ model_type=DEFAULT_MODEL_TYPE,
382
+ n_estimators=DEFAULT_ENSEMBLE,
383
+ device=DEFAULT_DEVICE,
374
384
  fine_tune=DEFAULT_FINE_TUNE,
375
- fine_tune_steps=DEFAULT_FINE_TUNE_STEPS,
385
+ fine_tune_steps=DEFAULT_FINE_TUNE_STEPS,
376
386
  metric=DEFAULT_REG_METRIC,
377
387
  state_dict=None,
378
388
  patience=PATIENCE,
@@ -387,12 +397,12 @@ class MitraRegressor(MitraBase, RegressorMixin):
387
397
  ):
388
398
  """Initialize the regressor."""
389
399
  super().__init__(
390
- model_type,
391
- n_estimators,
392
- device,
400
+ model_type,
401
+ n_estimators,
402
+ device,
393
403
  fine_tune,
394
404
  fine_tune_steps,
395
- metric,
405
+ metric,
396
406
  state_dict,
397
407
  patience=patience,
398
408
  lr=lr,
@@ -457,6 +467,6 @@ class MitraRegressor(MitraBase, RegressorMixin):
457
467
  """
458
468
  if isinstance(X, pd.DataFrame):
459
469
  X = X.values
460
-
470
+
461
471
  preds = [trainer.predict(self.X, self.y, X) for trainer in self.trainers]
462
- return sum(preds) / len(preds) # Averaging ensemble predictions
472
+ return sum(preds) / len(preds) # Averaging ensemble predictions
@@ -55,7 +55,12 @@ class RealMLPModel(AbstractModel):
55
55
  self._bool_to_cat = None
56
56
 
57
57
  def get_model_cls(self, default_hyperparameters: Literal["td", "td_s"] = "td"):
58
- from pytabkit import RealMLP_TD_Classifier, RealMLP_TD_Regressor, RealMLP_TD_S_Classifier, RealMLP_TD_S_Regressor
58
+ from pytabkit import (
59
+ RealMLP_TD_Classifier,
60
+ RealMLP_TD_Regressor,
61
+ RealMLP_TD_S_Classifier,
62
+ RealMLP_TD_S_Regressor,
63
+ )
59
64
 
60
65
  assert default_hyperparameters in ["td", "td_s"]
61
66
  if self.problem_type in ['binary', 'multiclass']:
@@ -267,9 +272,11 @@ class RealMLPModel(AbstractModel):
267
272
  return self.eval_metric
268
273
 
269
274
  def _get_default_resources(self) -> tuple[int, int]:
270
- # only_physical_cores=True is faster in training
275
+ # Use only physical cores for better performance based on benchmarks
271
276
  num_cpus = ResourceManager.get_cpu_count(only_physical_cores=True)
272
- num_gpus = min(ResourceManager.get_gpu_count_torch(), 1)
277
+ # Only request GPU if CUDA is available (RealMLP doesn't support MPS)
278
+ import torch
279
+ num_gpus = 1 if torch.cuda.is_available() else 0
273
280
  return num_cpus, num_gpus
274
281
 
275
282
  def _estimate_memory_usage(self, X: pd.DataFrame, **kwargs) -> int:
@@ -345,3 +352,4 @@ class RealMLPModel(AbstractModel):
345
352
  # How to mirror RealMLP learning rate scheduler while forcing stopping at a specific epoch?
346
353
  tags = {"can_refit_full": False}
347
354
  return tags
355
+ return tags
@@ -109,8 +109,11 @@ class TabICLModel(AbstractModel):
109
109
  return ["binary", "multiclass"]
110
110
 
111
111
  def _get_default_resources(self) -> tuple[int, int]:
112
+ # Use only physical cores for better performance based on benchmarks
112
113
  num_cpus = ResourceManager.get_cpu_count(only_physical_cores=True)
113
- num_gpus = min(ResourceManager.get_gpu_count_torch(), 1)
114
+ # Only request GPU if CUDA is available (TabICL doesn't support MPS)
115
+ import torch
116
+ num_gpus = 1 if torch.cuda.is_available() else 0
114
117
  return num_cpus, num_gpus
115
118
 
116
119
  def _estimate_memory_usage(self, X: pd.DataFrame, **kwargs) -> int:
@@ -12,13 +12,14 @@ import numpy as np
12
12
  import pandas as pd
13
13
  import scipy
14
14
  import torch
15
- from autogluon.core.metrics import compute_metric
16
15
  from sklearn.base import BaseEstimator, TransformerMixin
17
16
  from sklearn.impute import SimpleImputer
18
17
  from sklearn.pipeline import Pipeline
19
18
  from sklearn.preprocessing import OrdinalEncoder, QuantileTransformer
20
19
  from sklearn.utils.validation import check_is_fitted
21
20
 
21
+ from autogluon.core.metrics import compute_metric
22
+
22
23
  from . import rtdl_num_embeddings, tabm_reference
23
24
  from .tabm_reference import make_parameter_groups
24
25
 
@@ -468,7 +469,7 @@ class TabMImplementation:
468
469
  best = {"val": val_score, "epoch": epoch}
469
470
  remaining_patience = patience
470
471
  with torch.no_grad():
471
- for bp, p in zip(best_params, model.parameters(), strict=False):
472
+ for bp, p in zip(best_params, model.parameters()):
472
473
  bp.copy_(p)
473
474
  else:
474
475
  remaining_patience -= 1
@@ -481,7 +482,7 @@ class TabMImplementation:
481
482
 
482
483
  logger.log(15, "Restoring best model")
483
484
  with torch.no_grad():
484
- for bp, p in zip(best_params, model.parameters(), strict=False):
485
+ for bp, p in zip(best_params, model.parameters()):
485
486
  p.copy_(bp)
486
487
 
487
488
  self.model_ = model
@@ -19,6 +19,7 @@ import logging
19
19
  import time
20
20
 
21
21
  import pandas as pd
22
+
22
23
  from autogluon.common.utils.resource_utils import ResourceManager
23
24
  from autogluon.core.models import AbstractModel
24
25
  from autogluon.tabular import __version__
@@ -55,8 +56,9 @@ class TabMModel(AbstractModel):
55
56
 
56
57
  try:
57
58
  # imports various dependencies such as torch
58
- from ._tabm_internal import TabMImplementation
59
59
  from torch.cuda import is_available
60
+
61
+ from ._tabm_internal import TabMImplementation
60
62
  except ImportError as err:
61
63
  logger.log(
62
64
  40,
@@ -146,9 +148,11 @@ class TabMModel(AbstractModel):
146
148
  return self.eval_metric
147
149
 
148
150
  def _get_default_resources(self) -> tuple[int, int]:
149
- # only_physical_cores=True is faster in training
151
+ # Use only physical cores for better performance based on benchmarks
150
152
  num_cpus = ResourceManager.get_cpu_count(only_physical_cores=True)
151
- num_gpus = min(ResourceManager.get_gpu_count_torch(), 1)
153
+ # Only request GPU if CUDA is available (TabM doesn't support other accelerators such as MPS)
154
+ import torch
155
+ num_gpus = 1 if torch.cuda.is_available() else 0
152
156
  return num_cpus, num_gpus
153
157
 
154
158
  def _estimate_memory_usage(self, X: pd.DataFrame, **kwargs) -> int:
@@ -4,7 +4,7 @@
4
4
  # The minimum required versions of the dependencies are specified in README.md.
5
5
 
6
6
  import itertools
7
- from typing import Any, Literal
7
+ from typing import Any, Literal, Union
8
8
 
9
9
  import torch
10
10
  import torch.nn as nn
@@ -159,9 +159,9 @@ class LinearEfficientEnsemble(nn.Module):
159
159
  avoids the term "adapter".
160
160
  """
161
161
 
162
- r: None | Tensor
163
- s: None | Tensor
164
- bias: None | Tensor
162
+ r: Union[None, Tensor]
163
+ s: Union[None, Tensor]
164
+ bias: Union[None, Tensor]
165
165
 
166
166
  def __init__(
167
167
  self,
@@ -259,8 +259,8 @@ class MLP(nn.Module):
259
259
  def __init__(
260
260
  self,
261
261
  *,
262
- d_in: None | int = None,
263
- d_out: None | int = None,
262
+ d_in: Union[None, int] = None,
263
+ d_out: Union[None, int] = None,
264
264
  n_blocks: int,
265
265
  d_block: int,
266
266
  dropout: float,
@@ -386,19 +386,21 @@ def default_zero_weight_decay_condition(
386
386
  del module_name, parameter
387
387
  return parameter_name.endswith('bias') or isinstance(
388
388
  module,
389
- nn.BatchNorm1d
390
- | nn.LayerNorm
391
- | nn.InstanceNorm1d
392
- | rtdl_num_embeddings.LinearEmbeddings
393
- | rtdl_num_embeddings.LinearReLUEmbeddings
394
- | _Periodic,
389
+ (
390
+ nn.BatchNorm1d,
391
+ nn.LayerNorm,
392
+ nn.InstanceNorm1d,
393
+ rtdl_num_embeddings.LinearEmbeddings,
394
+ rtdl_num_embeddings.LinearReLUEmbeddings,
395
+ _Periodic,
396
+ ),
395
397
  )
396
398
 
397
399
 
398
400
  def make_parameter_groups(
399
401
  module: nn.Module,
400
402
  zero_weight_decay_condition=default_zero_weight_decay_condition,
401
- custom_groups: None | list[dict[str, Any]] = None,
403
+ custom_groups: Union[None, list[dict[str, Any]]] = None,
402
404
  ) -> list[dict[str, Any]]:
403
405
  if custom_groups is None:
404
406
  custom_groups = []
@@ -439,10 +441,10 @@ class Model(nn.Module):
439
441
  *,
440
442
  n_num_features: int,
441
443
  cat_cardinalities: list[int],
442
- n_classes: None | int,
444
+ n_classes: Union[None, int],
443
445
  backbone: dict,
444
- bins: None | list[Tensor], # For piecewise-linear encoding/embeddings.
445
- num_embeddings: None | dict = None,
446
+ bins: Union[None, list[Tensor]], # For piecewise-linear encoding/embeddings.
447
+ num_embeddings: Union[None, dict] = None,
446
448
  arch_type: Literal[
447
449
  # Plain feed-forward network without any kind of ensembling.
448
450
  'plain',
@@ -464,7 +466,7 @@ class Model(nn.Module):
464
466
  # This variant was not used in the paper.
465
467
  'tabm-mini-normal',
466
468
  ],
467
- k: None | int = None,
469
+ k: Union[None, int] = None,
468
470
  share_training_batches: bool = True,
469
471
  ) -> None:
470
472
  # >>> Validate arguments.
@@ -593,7 +595,7 @@ class Model(nn.Module):
593
595
  self.share_training_batches = share_training_batches
594
596
 
595
597
  def forward(
596
- self, x_num: None | Tensor = None, x_cat: None | Tensor = None
598
+ self, x_num: Union[None, Tensor] = None, x_cat: Union[None, Tensor] = None
597
599
  ) -> Tensor:
598
600
  x = []
599
601
  if x_num is not None:
@@ -624,4 +626,4 @@ class Model(nn.Module):
624
626
  # with the rest of the script (loss, metrics, predictions, ...).
625
627
  # (B, D_OUT) -> (B, 1, D_OUT)
626
628
  x = x[:, None]
627
- return x
629
+ return x
@@ -16,11 +16,12 @@ from typing import TYPE_CHECKING, Any
16
16
 
17
17
  import numpy as np
18
18
  import scipy
19
+ from sklearn.preprocessing import PowerTransformer
20
+
19
21
  from autogluon.common.utils.resource_utils import ResourceManager
20
22
  from autogluon.core.models import AbstractModel
21
23
  from autogluon.features.generators import LabelEncoderFeatureGenerator
22
24
  from autogluon.tabular import __version__
23
- from sklearn.preprocessing import PowerTransformer
24
25
 
25
26
  if TYPE_CHECKING:
26
27
  import pandas as pd
@@ -243,10 +244,7 @@ class TabPFNV2Model(AbstractModel):
243
244
  n_ensemble_repeats = hps.pop("n_ensemble_repeats", None)
244
245
  model_is_rf_pfn = hps.pop("model_type", "no") == "dt_pfn"
245
246
  if model_is_rf_pfn:
246
- from .rfpfn import (
247
- RandomForestTabPFNClassifier,
248
- RandomForestTabPFNRegressor,
249
- )
247
+ from .rfpfn import RandomForestTabPFNClassifier, RandomForestTabPFNRegressor
250
248
 
251
249
  hps["n_estimators"] = 1
252
250
  rf_model_base = (
@@ -272,18 +270,21 @@ class TabPFNV2Model(AbstractModel):
272
270
  def _log_license(self, device: str):
273
271
  global _HAS_LOGGED_TABPFN_LICENSE
274
272
  if not _HAS_LOGGED_TABPFN_LICENSE:
275
- logger.log(20, f"\tBuilt with PriorLabs-TabPFN") # Aligning with TabPFNv2 license requirements
273
+ logger.log(20, "\tBuilt with PriorLabs-TabPFN") # Aligning with TabPFNv2 license requirements
276
274
  if device == "cpu":
277
275
  logger.log(
278
276
  20,
279
- f"\tRunning TabPFNv2 on CPU. This can be very slow. "
280
- f"It is recommended to run TabPFNv2 on a GPU."
277
+ "\tRunning TabPFNv2 on CPU. This can be very slow. "
278
+ "It is recommended to run TabPFNv2 on a GPU."
281
279
  )
282
280
  _HAS_LOGGED_TABPFN_LICENSE = True # Avoid repeated logging
283
281
 
284
282
  def _get_default_resources(self) -> tuple[int, int]:
283
+ # Use only physical cores for better performance based on benchmarks
285
284
  num_cpus = ResourceManager.get_cpu_count(only_physical_cores=True)
286
- num_gpus = min(ResourceManager.get_gpu_count_torch(), 1)
285
+ # Only request GPU if CUDA is available (TabPFNV2 doesn't support other accelerators such as MPS)
286
+ import torch
287
+ num_gpus = 1 if torch.cuda.is_available() else 0
287
288
  return num_cpus, num_gpus
288
289
 
289
290
  def _set_default_params(self):
@@ -441,9 +441,9 @@ class FitHelper:
441
441
  num_bag_sets=1,
442
442
  )
443
443
  if isinstance(bag, bool):
444
- problem_types_bag = supported_problem_types
444
+ problem_types_bag = problem_types_to_check
445
445
  elif bag == "first":
446
- problem_types_bag = supported_problem_types[:1]
446
+ problem_types_bag = problem_types_to_check[:1]
447
447
  else:
448
448
  raise ValueError(f"Unknown 'bag' value: {bag}")
449
449
 
@@ -1,4 +1,4 @@
1
1
  """This is the autogluon version file."""
2
2
 
3
- __version__ = "1.3.2b20250714"
3
+ __version__ = "1.3.2b20250716"
4
4
  __lite__ = False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.tabular
3
- Version: 1.3.2b20250714
3
+ Version: 1.3.2b20250716
4
4
  Summary: Fast and Accurate ML in 3 Lines of Code
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -41,20 +41,20 @@ Requires-Dist: scipy<1.17,>=1.5.4
41
41
  Requires-Dist: pandas<2.4.0,>=2.0.0
42
42
  Requires-Dist: scikit-learn<1.8.0,>=1.4.0
43
43
  Requires-Dist: networkx<4,>=3.0
44
- Requires-Dist: autogluon.core==1.3.2b20250714
45
- Requires-Dist: autogluon.features==1.3.2b20250714
44
+ Requires-Dist: autogluon.core==1.3.2b20250716
45
+ Requires-Dist: autogluon.features==1.3.2b20250716
46
46
  Provides-Extra: all
47
- Requires-Dist: spacy<3.9; extra == "all"
47
+ Requires-Dist: lightgbm<4.7,>=4.0; extra == "all"
48
+ Requires-Dist: autogluon.core[all]==1.3.2b20250716; extra == "all"
49
+ Requires-Dist: pytabkit<1.6,>=1.5; extra == "all"
48
50
  Requires-Dist: huggingface-hub[torch]; extra == "all"
49
- Requires-Dist: fastai<2.9,>=2.3.1; extra == "all"
51
+ Requires-Dist: torch<2.8,>=2.2; extra == "all"
52
+ Requires-Dist: spacy<3.9; extra == "all"
50
53
  Requires-Dist: numpy<2.3.0,>=1.25; extra == "all"
51
- Requires-Dist: pytabkit<1.6,>=1.5; extra == "all"
52
- Requires-Dist: einops<0.9,>=0.7; extra == "all"
53
- Requires-Dist: autogluon.core[all]==1.3.2b20250714; extra == "all"
54
54
  Requires-Dist: xgboost<3.1,>=2.0; extra == "all"
55
- Requires-Dist: lightgbm<4.7,>=4.0; extra == "all"
56
- Requires-Dist: torch<2.8,>=2.2; extra == "all"
57
55
  Requires-Dist: catboost<1.3,>=1.2; extra == "all"
56
+ Requires-Dist: einops<0.9,>=0.7; extra == "all"
57
+ Requires-Dist: fastai<2.9,>=2.3.1; extra == "all"
58
58
  Provides-Extra: catboost
59
59
  Requires-Dist: numpy<2.3.0,>=1.25; extra == "catboost"
60
60
  Requires-Dist: catboost<1.3,>=1.2; extra == "catboost"
@@ -72,7 +72,7 @@ Requires-Dist: einx; extra == "mitra"
72
72
  Requires-Dist: omegaconf; extra == "mitra"
73
73
  Requires-Dist: transformers; extra == "mitra"
74
74
  Provides-Extra: ray
75
- Requires-Dist: autogluon.core[all]==1.3.2b20250714; extra == "ray"
75
+ Requires-Dist: autogluon.core[all]==1.3.2b20250716; extra == "ray"
76
76
  Provides-Extra: realmlp
77
77
  Requires-Dist: pytabkit<1.6,>=1.5; extra == "realmlp"
78
78
  Provides-Extra: skex