autogluon.tabular 1.4.1b20251014__py3-none-any.whl → 1.5.0b20251222__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. autogluon/tabular/configs/hyperparameter_configs.py +4 -0
  2. autogluon/tabular/configs/presets_configs.py +39 -2
  3. autogluon/tabular/configs/zeroshot/zeroshot_portfolio_2025.py +2 -44
  4. autogluon/tabular/configs/zeroshot/zeroshot_portfolio_cpu_2025_12_18.py +2 -0
  5. autogluon/tabular/configs/zeroshot/zeroshot_portfolio_gpu_2025_12_18.py +2 -0
  6. autogluon/tabular/learner/default_learner.py +1 -0
  7. autogluon/tabular/models/__init__.py +3 -1
  8. autogluon/tabular/models/abstract/__init__.py +0 -0
  9. autogluon/tabular/models/abstract/abstract_torch_model.py +148 -0
  10. autogluon/tabular/models/catboost/catboost_model.py +2 -5
  11. autogluon/tabular/models/ebm/ebm_model.py +2 -6
  12. autogluon/tabular/models/fastainn/tabular_nn_fastai.py +9 -3
  13. autogluon/tabular/models/lgb/lgb_model.py +60 -17
  14. autogluon/tabular/models/lgb/lgb_utils.py +2 -2
  15. autogluon/tabular/models/lr/lr_model.py +2 -4
  16. autogluon/tabular/models/lr/lr_preprocessing_utils.py +6 -7
  17. autogluon/tabular/models/mitra/_internal/core/trainer_finetune.py +14 -1
  18. autogluon/tabular/models/mitra/mitra_model.py +55 -29
  19. autogluon/tabular/models/realmlp/realmlp_model.py +8 -5
  20. autogluon/tabular/models/rf/rf_model.py +6 -8
  21. autogluon/tabular/models/tabdpt/__init__.py +0 -0
  22. autogluon/tabular/models/tabdpt/tabdpt_model.py +253 -0
  23. autogluon/tabular/models/tabicl/tabicl_model.py +15 -5
  24. autogluon/tabular/models/tabm/tabm_model.py +25 -8
  25. autogluon/tabular/models/tabpfnmix/tabpfnmix_model.py +7 -5
  26. autogluon/tabular/models/tabpfnv2/tabpfnv2_5_model.py +451 -0
  27. autogluon/tabular/models/tabpfnv2/tabpfnv2_model.py +87 -12
  28. autogluon/tabular/models/tabprep/__init__.py +0 -0
  29. autogluon/tabular/models/tabprep/prep_lgb_model.py +21 -0
  30. autogluon/tabular/models/tabprep/prep_mixin.py +220 -0
  31. autogluon/tabular/models/tabular_nn/torch/tabular_nn_torch.py +3 -6
  32. autogluon/tabular/models/tabular_nn/utils/data_preprocessor.py +12 -4
  33. autogluon/tabular/models/xgboost/xgboost_model.py +3 -4
  34. autogluon/tabular/predictor/predictor.py +50 -20
  35. autogluon/tabular/registry/_ag_model_registry.py +8 -2
  36. autogluon/tabular/testing/fit_helper.py +61 -0
  37. autogluon/tabular/trainer/abstract_trainer.py +45 -9
  38. autogluon/tabular/trainer/auto_trainer.py +5 -0
  39. autogluon/tabular/version.py +1 -1
  40. autogluon.tabular-1.5.0b20251222-py3.11-nspkg.pth +1 -0
  41. {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.5.0b20251222.dist-info}/METADATA +97 -87
  42. {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.5.0b20251222.dist-info}/RECORD +48 -38
  43. {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.5.0b20251222.dist-info}/WHEEL +1 -1
  44. autogluon.tabular-1.4.1b20251014-py3.9-nspkg.pth +0 -1
  45. {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.5.0b20251222.dist-info/licenses}/LICENSE +0 -0
  46. {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.5.0b20251222.dist-info/licenses}/NOTICE +0 -0
  47. {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.5.0b20251222.dist-info}/namespace_packages.txt +0 -0
  48. {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.5.0b20251222.dist-info}/top_level.txt +0 -0
  49. {autogluon.tabular-1.4.1b20251014.dist-info → autogluon_tabular-1.5.0b20251222.dist-info}/zip-safe +0 -0
@@ -27,7 +27,7 @@ from autogluon.core.calibrate.conformity_score import compute_conformity_score
27
27
  from autogluon.core.calibrate.temperature_scaling import apply_temperature_scaling, tune_temperature_scaling
28
28
  from autogluon.core.callbacks import AbstractCallback
29
29
  from autogluon.core.constants import BINARY, MULTICLASS, QUANTILE, REFIT_FULL_NAME, REGRESSION, SOFTCLASS
30
- from autogluon.core.data.label_cleaner import LabelCleanerMulticlassToBinary
30
+ from autogluon.core.data.label_cleaner import LabelCleanerMulticlassToBinary, LabelCleaner
31
31
  from autogluon.core.metrics import Scorer, compute_metric, get_metric
32
32
  from autogluon.core.models import (
33
33
  AbstractModel,
@@ -530,7 +530,7 @@ class AbstractTabularTrainer(AbstractTrainer[AbstractModel]):
530
530
  self.save()
531
531
  return model_names_fit
532
532
 
533
- def _fit_setup(self, time_limit: float | None = None, callbacks: list[AbstractCallback] | None = None):
533
+ def _fit_setup(self, time_limit: float | None = None, callbacks: list[AbstractCallback | list | tuple] | None = None):
534
534
  """
535
535
  Prepare the trainer state at the start of / prior to a fit call.
536
536
  Should be paired with a `self._fit_cleanup()` at the conclusion of the fit call.
@@ -539,15 +539,45 @@ class AbstractTabularTrainer(AbstractTrainer[AbstractModel]):
539
539
  self._time_train_start_last = self._time_train_start
540
540
  self._time_limit = time_limit
541
541
  self.reset_callbacks()
542
+ callbacks_new = []
542
543
  if callbacks is not None:
543
544
  assert isinstance(callbacks, list), f"`callbacks` must be a list. Found invalid type: `{type(callbacks)}`."
544
545
  for callback in callbacks:
545
- assert isinstance(
546
- callback, AbstractCallback
547
- ), f"Elements in `callbacks` must be of type AbstractCallback. Found invalid type: `{type(callback)}`."
546
+ if isinstance(callback, (list, tuple)):
547
+ assert len(callback) == 2, f"Callback must either be an initialized object or a tuple/list of length 2, found: {callback}"
548
+ callback_cls = callback[0]
549
+ if isinstance(callback_cls, str):
550
+ from autogluon.core.callbacks._early_stopping_count_callback import EarlyStoppingCountCallback
551
+ from autogluon.core.callbacks._early_stopping_callback import EarlyStoppingCallback
552
+ from autogluon.core.callbacks._early_stopping_ensemble_callback import EarlyStoppingEnsembleCallback
553
+
554
+ _callback_cls_lst = [
555
+ EarlyStoppingCallback,
556
+ EarlyStoppingCountCallback,
557
+ EarlyStoppingEnsembleCallback,
558
+ ]
559
+
560
+ _callback_cls_name_map = {
561
+ c.__name__: c for c in _callback_cls_lst
562
+ }
563
+
564
+ assert callback_cls in _callback_cls_name_map.keys(), (
565
+ f"Unknown callback class: {callback_cls}. "
566
+ f"Valid classes: {list(_callback_cls_name_map.keys())}"
567
+ )
568
+ callback_cls = _callback_cls_name_map[callback_cls]
569
+
570
+ callback_kwargs = callback[1]
571
+ assert isinstance(callback_kwargs, dict), f"Callback kwargs must be a dictionary, found: {callback_kwargs}"
572
+ callback = callback_cls(**callback_kwargs)
573
+ else:
574
+ assert isinstance(
575
+ callback, AbstractCallback
576
+ ), f"Elements in `callbacks` must be of type AbstractCallback. Found invalid type: `{type(callback)}`."
577
+ callbacks_new.append(callback)
548
578
  else:
549
- callbacks = []
550
- self.callbacks = callbacks
579
+ callbacks_new = []
580
+ self.callbacks = callbacks_new
551
581
 
552
582
  def _fit_cleanup(self):
553
583
  """
@@ -2493,6 +2523,7 @@ class AbstractTabularTrainer(AbstractTrainer[AbstractModel]):
2493
2523
  errors_ignore: list | None = None,
2494
2524
  errors_raise: list | None = None,
2495
2525
  is_ray_worker: bool = False,
2526
+ label_cleaner: None | LabelCleaner = None,
2496
2527
  **kwargs,
2497
2528
  ) -> list[str]:
2498
2529
  """
@@ -2527,7 +2558,8 @@ class AbstractTabularTrainer(AbstractTrainer[AbstractModel]):
2527
2558
  return []
2528
2559
 
2529
2560
  model_fit_kwargs = self._get_model_fit_kwargs(
2530
- X=X, X_val=X_val, time_limit=time_limit, k_fold=k_fold, fit_kwargs=fit_kwargs, ens_sample_weight=kwargs.get("ens_sample_weight", None)
2561
+ X=X, X_val=X_val, time_limit=time_limit, k_fold=k_fold, fit_kwargs=fit_kwargs,
2562
+ ens_sample_weight=kwargs.get("ens_sample_weight", None), label_cleaner=label_cleaner,
2531
2563
  )
2532
2564
  exception = None
2533
2565
  if hyperparameter_tune_kwargs:
@@ -4294,7 +4326,8 @@ class AbstractTabularTrainer(AbstractTrainer[AbstractModel]):
4294
4326
  return distilled_model_names
4295
4327
 
4296
4328
  def _get_model_fit_kwargs(
4297
- self, X: pd.DataFrame, X_val: pd.DataFrame, time_limit: float, k_fold: int, fit_kwargs: dict, ens_sample_weight: list | None = None
4329
+ self, X: pd.DataFrame, X_val: pd.DataFrame, time_limit: float, k_fold: int,
4330
+ fit_kwargs: dict, ens_sample_weight: list | None = None, label_cleaner: None | LabelCleaner = None
4298
4331
  ) -> dict:
4299
4332
  # Returns kwargs to be passed to AbstractModel's fit function
4300
4333
  if fit_kwargs is None:
@@ -4316,6 +4349,9 @@ class AbstractTabularTrainer(AbstractTrainer[AbstractModel]):
4316
4349
  if k_fold == self.k_fold: # don't do this on refit full
4317
4350
  model_fit_kwargs["groups"] = self._groups
4318
4351
 
4352
+ if label_cleaner is not None:
4353
+ model_fit_kwargs["label_cleaner"] = label_cleaner
4354
+
4319
4355
  # FIXME: Sample weight `extract_column` is a hack, have to compute feature_metadata here because sample weight column could be in X upstream, extract sample weight column upstream instead.
4320
4356
  if "feature_metadata" not in model_fit_kwargs:
4321
4357
  raise AssertionError(f"Missing expected parameter 'feature_metadata'.")
@@ -59,6 +59,7 @@ class AutoTrainer(AbstractTabularTrainer):
59
59
  use_bag_holdout=False,
60
60
  groups=None,
61
61
  callbacks: list[callable] = None,
62
+ label_cleaner=None,
62
63
  **kwargs,
63
64
  ):
64
65
  for key in kwargs:
@@ -112,6 +113,7 @@ class AutoTrainer(AbstractTabularTrainer):
112
113
  extra_log_str = ""
113
114
  display_all = (n_configs < 20) or (self.verbosity >= 3)
114
115
  if not display_all:
116
+ # FIXME: This isn't correct
115
117
  extra_log_str = (
116
118
  f"Large model count detected ({n_configs} configs) ... " f"Only displaying the first 3 models of each family. To see all, set `verbosity=3`.\n"
117
119
  )
@@ -132,6 +134,9 @@ class AutoTrainer(AbstractTabularTrainer):
132
134
  log_str += "}"
133
135
  logger.log(20, log_str)
134
136
 
137
+ if label_cleaner is not None:
138
+ core_kwargs["label_cleaner"] = label_cleaner
139
+
135
140
  self._train_multi_and_ensemble(
136
141
  X=X,
137
142
  y=y,
@@ -1,4 +1,4 @@
1
1
  """This is the autogluon version file."""
2
2
 
3
- __version__ = "1.4.1b20251014"
3
+ __version__ = "1.5.0b20251222"
4
4
  __lite__ = False
@@ -0,0 +1 @@
1
+ import sys, types, os;p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('autogluon',));importlib = __import__('importlib.util');__import__('importlib.machinery');m = sys.modules.setdefault('autogluon', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('autogluon', [os.path.dirname(p)])));m = m or sys.modules.setdefault('autogluon', types.ModuleType('autogluon'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: autogluon.tabular
3
- Version: 1.4.1b20251014
3
+ Version: 1.5.0b20251222
4
4
  Summary: Fast and Accurate ML in 3 Lines of Code
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -9,7 +9,6 @@ Project-URL: Documentation, https://auto.gluon.ai
9
9
  Project-URL: Bug Reports, https://github.com/autogluon/autogluon/issues
10
10
  Project-URL: Source, https://github.com/autogluon/autogluon/
11
11
  Project-URL: Contribute!, https://github.com/autogluon/autogluon/blob/master/CONTRIBUTING.md
12
- Platform: UNKNOWN
13
12
  Classifier: Development Status :: 4 - Beta
14
13
  Classifier: Intended Audience :: Education
15
14
  Classifier: Intended Audience :: Developers
@@ -24,121 +23,131 @@ Classifier: Operating System :: Microsoft :: Windows
24
23
  Classifier: Operating System :: POSIX
25
24
  Classifier: Operating System :: Unix
26
25
  Classifier: Programming Language :: Python :: 3
27
- Classifier: Programming Language :: Python :: 3.9
28
26
  Classifier: Programming Language :: Python :: 3.10
29
27
  Classifier: Programming Language :: Python :: 3.11
30
28
  Classifier: Programming Language :: Python :: 3.12
29
+ Classifier: Programming Language :: Python :: 3.13
31
30
  Classifier: Topic :: Software Development
32
31
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
33
32
  Classifier: Topic :: Scientific/Engineering :: Information Analysis
34
33
  Classifier: Topic :: Scientific/Engineering :: Image Recognition
35
- Requires-Python: >=3.9, <3.13
34
+ Requires-Python: >=3.10, <3.14
36
35
  Description-Content-Type: text/markdown
37
- License-File: ../LICENSE
38
- License-File: ../NOTICE
36
+ License-File: LICENSE
37
+ License-File: NOTICE
39
38
  Requires-Dist: numpy<2.4.0,>=1.25.0
40
39
  Requires-Dist: scipy<1.17,>=1.5.4
41
40
  Requires-Dist: pandas<2.4.0,>=2.0.0
42
41
  Requires-Dist: scikit-learn<1.8.0,>=1.4.0
43
42
  Requires-Dist: networkx<4,>=3.0
44
- Requires-Dist: autogluon.core==1.4.1b20251014
45
- Requires-Dist: autogluon.features==1.4.1b20251014
46
- Provides-Extra: all
47
- Requires-Dist: catboost<1.3,>=1.2; extra == "all"
48
- Requires-Dist: autogluon.core[all]==1.4.1b20251014; extra == "all"
49
- Requires-Dist: fastai<2.9,>=2.3.1; extra == "all"
50
- Requires-Dist: loguru; extra == "all"
51
- Requires-Dist: numpy<2.3.0,>=1.25; extra == "all"
52
- Requires-Dist: xgboost<3.1,>=2.0; extra == "all"
53
- Requires-Dist: spacy<3.9; extra == "all"
54
- Requires-Dist: einops<0.9,>=0.7; extra == "all"
55
- Requires-Dist: omegaconf; extra == "all"
56
- Requires-Dist: torch<2.8,>=2.6; extra == "all"
57
- Requires-Dist: huggingface-hub[torch]; extra == "all"
58
- Requires-Dist: einx; extra == "all"
59
- Requires-Dist: lightgbm<4.7,>=4.0; extra == "all"
60
- Requires-Dist: transformers; extra == "all"
61
- Requires-Dist: blis<1.2.1,>=0.7.0; (platform_system == "Windows" and python_version == "3.9") and extra == "all"
43
+ Requires-Dist: autogluon.core==1.5.0b20251222
44
+ Requires-Dist: autogluon.features==1.5.0b20251222
45
+ Provides-Extra: lightgbm
46
+ Requires-Dist: lightgbm<4.7,>=4.0; extra == "lightgbm"
62
47
  Provides-Extra: catboost
63
- Requires-Dist: numpy<2.3.0,>=1.25; extra == "catboost"
64
48
  Requires-Dist: catboost<1.3,>=1.2; extra == "catboost"
49
+ Provides-Extra: xgboost
50
+ Requires-Dist: xgboost<3.2,>=2.0; extra == "xgboost"
51
+ Provides-Extra: realmlp
52
+ Requires-Dist: pytabkit<1.8,>=1.7.2; extra == "realmlp"
53
+ Provides-Extra: interpret
54
+ Requires-Dist: interpret-core<0.8,>=0.7.2; extra == "interpret"
65
55
  Provides-Extra: fastai
66
56
  Requires-Dist: spacy<3.9; extra == "fastai"
67
- Requires-Dist: torch<2.8,>=2.6; extra == "fastai"
57
+ Requires-Dist: torch<2.10,>=2.6; extra == "fastai"
68
58
  Requires-Dist: fastai<2.9,>=2.3.1; extra == "fastai"
69
- Requires-Dist: blis<1.2.1,>=0.7.0; (platform_system == "Windows" and python_version == "3.9") and extra == "fastai"
70
- Provides-Extra: imodels
71
- Requires-Dist: imodels<2.1.0,>=1.3.10; extra == "imodels"
72
- Provides-Extra: interpret
73
- Requires-Dist: interpret-core<0.8,>=0.7.2; extra == "interpret"
74
- Provides-Extra: lightgbm
75
- Requires-Dist: lightgbm<4.7,>=4.0; extra == "lightgbm"
59
+ Provides-Extra: tabm
60
+ Requires-Dist: torch<2.10,>=2.6; extra == "tabm"
61
+ Provides-Extra: tabpfn
62
+ Requires-Dist: tabpfn<6.2.1,>=6.2.0; extra == "tabpfn"
63
+ Provides-Extra: tabdpt
64
+ Requires-Dist: tabdpt<1.2,>=1.1.11; extra == "tabdpt"
65
+ Provides-Extra: tabpfnmix
66
+ Requires-Dist: torch<2.10,>=2.6; extra == "tabpfnmix"
67
+ Requires-Dist: huggingface_hub[torch]<1.0; extra == "tabpfnmix"
68
+ Requires-Dist: einops<0.9,>=0.7; extra == "tabpfnmix"
76
69
  Provides-Extra: mitra
77
70
  Requires-Dist: loguru; extra == "mitra"
78
71
  Requires-Dist: einx; extra == "mitra"
79
72
  Requires-Dist: omegaconf; extra == "mitra"
80
- Requires-Dist: torch<2.8,>=2.6; extra == "mitra"
73
+ Requires-Dist: torch<2.10,>=2.6; extra == "mitra"
81
74
  Requires-Dist: transformers; extra == "mitra"
82
- Requires-Dist: huggingface-hub[torch]; extra == "mitra"
75
+ Requires-Dist: huggingface_hub[torch]<1.0; extra == "mitra"
83
76
  Requires-Dist: einops<0.9,>=0.7; extra == "mitra"
77
+ Provides-Extra: tabicl
78
+ Requires-Dist: tabicl<0.2,>=0.1.4; extra == "tabicl"
84
79
  Provides-Extra: ray
85
- Requires-Dist: autogluon.core[all]==1.4.1b20251014; extra == "ray"
86
- Provides-Extra: realmlp
87
- Requires-Dist: pytabkit<1.7,>=1.6; extra == "realmlp"
80
+ Requires-Dist: autogluon.core[all]==1.5.0b20251222; extra == "ray"
88
81
  Provides-Extra: skex
89
- Requires-Dist: scikit-learn-intelex<2025.5,>=2024.0; extra == "skex"
82
+ Requires-Dist: scikit-learn-intelex<2025.10,>=2025.0; extra == "skex"
83
+ Provides-Extra: imodels
84
+ Requires-Dist: imodels<2.1.0,>=1.3.10; extra == "imodels"
90
85
  Provides-Extra: skl2onnx
91
- Requires-Dist: skl2onnx<1.18.0,>=1.15.0; extra == "skl2onnx"
92
- Requires-Dist: onnxruntime<1.20.0,>=1.17.0; extra == "skl2onnx"
93
- Requires-Dist: onnxruntime-gpu<1.20.0,>=1.17.0; extra == "skl2onnx"
94
- Requires-Dist: onnx<1.18.0,>=1.13.0; platform_system != "Windows" and extra == "skl2onnx"
95
- Requires-Dist: onnx<1.16.2,>=1.13.0; platform_system == "Windows" and extra == "skl2onnx"
86
+ Requires-Dist: skl2onnx<1.20.0,>=1.15.0; extra == "skl2onnx"
87
+ Requires-Dist: onnx!=1.16.2,<1.21.0,>=1.13.0; platform_system == "Windows" and extra == "skl2onnx"
88
+ Requires-Dist: onnx<1.21.0,>=1.13.0; platform_system != "Windows" and extra == "skl2onnx"
89
+ Requires-Dist: onnxruntime<1.24.0,>=1.17.0; extra == "skl2onnx"
90
+ Requires-Dist: onnxruntime-gpu<1.24.0,>=1.17.0; (platform_system != "Darwin" and platform_machine != "aarch64") and extra == "skl2onnx"
91
+ Provides-Extra: all
92
+ Requires-Dist: omegaconf; extra == "all"
93
+ Requires-Dist: transformers; extra == "all"
94
+ Requires-Dist: fastai<2.9,>=2.3.1; extra == "all"
95
+ Requires-Dist: einx; extra == "all"
96
+ Requires-Dist: autogluon.core[all]==1.5.0b20251222; extra == "all"
97
+ Requires-Dist: huggingface_hub[torch]<1.0; extra == "all"
98
+ Requires-Dist: einops<0.9,>=0.7; extra == "all"
99
+ Requires-Dist: torch<2.10,>=2.6; extra == "all"
100
+ Requires-Dist: spacy<3.9; extra == "all"
101
+ Requires-Dist: xgboost<3.2,>=2.0; extra == "all"
102
+ Requires-Dist: lightgbm<4.7,>=4.0; extra == "all"
103
+ Requires-Dist: catboost<1.3,>=1.2; extra == "all"
104
+ Requires-Dist: loguru; extra == "all"
96
105
  Provides-Extra: tabarena
97
- Requires-Dist: xgboost<3.1,>=2.0; extra == "tabarena"
98
- Requires-Dist: lightgbm<4.7,>=4.0; extra == "tabarena"
106
+ Requires-Dist: pytabkit<1.8,>=1.7.2; extra == "tabarena"
107
+ Requires-Dist: omegaconf; extra == "tabarena"
108
+ Requires-Dist: tabpfn<6.2.1,>=6.2.0; extra == "tabarena"
109
+ Requires-Dist: interpret-core<0.8,>=0.7.2; extra == "tabarena"
99
110
  Requires-Dist: transformers; extra == "tabarena"
100
111
  Requires-Dist: fastai<2.9,>=2.3.1; extra == "tabarena"
112
+ Requires-Dist: tabicl<0.2,>=0.1.4; extra == "tabarena"
101
113
  Requires-Dist: einx; extra == "tabarena"
102
- Requires-Dist: tabicl<0.2,>=0.1.3; extra == "tabarena"
114
+ Requires-Dist: autogluon.core[all]==1.5.0b20251222; extra == "tabarena"
115
+ Requires-Dist: huggingface_hub[torch]<1.0; extra == "tabarena"
116
+ Requires-Dist: einops<0.9,>=0.7; extra == "tabarena"
117
+ Requires-Dist: torch<2.10,>=2.6; extra == "tabarena"
118
+ Requires-Dist: tabdpt<1.2,>=1.1.11; extra == "tabarena"
119
+ Requires-Dist: spacy<3.9; extra == "tabarena"
120
+ Requires-Dist: xgboost<3.2,>=2.0; extra == "tabarena"
121
+ Requires-Dist: lightgbm<4.7,>=4.0; extra == "tabarena"
103
122
  Requires-Dist: catboost<1.3,>=1.2; extra == "tabarena"
104
123
  Requires-Dist: loguru; extra == "tabarena"
105
- Requires-Dist: tabpfn<2.2,>=2.0.9; extra == "tabarena"
106
- Requires-Dist: omegaconf; extra == "tabarena"
107
- Requires-Dist: pytabkit<1.7,>=1.6; extra == "tabarena"
108
- Requires-Dist: autogluon.core[all]==1.4.1b20251014; extra == "tabarena"
109
- Requires-Dist: numpy<2.3.0,>=1.25; extra == "tabarena"
110
- Requires-Dist: spacy<3.9; extra == "tabarena"
111
- Requires-Dist: interpret-core<0.8,>=0.7.2; extra == "tabarena"
112
- Requires-Dist: torch<2.8,>=2.6; extra == "tabarena"
113
- Requires-Dist: einops<0.9,>=0.7; extra == "tabarena"
114
- Requires-Dist: huggingface-hub[torch]; extra == "tabarena"
115
- Requires-Dist: blis<1.2.1,>=0.7.0; (platform_system == "Windows" and python_version == "3.9") and extra == "tabarena"
116
- Provides-Extra: tabicl
117
- Requires-Dist: tabicl<0.2,>=0.1.3; extra == "tabicl"
118
- Provides-Extra: tabm
119
- Requires-Dist: torch<2.8,>=2.6; extra == "tabm"
120
- Provides-Extra: tabpfn
121
- Requires-Dist: tabpfn<2.2,>=2.0.9; extra == "tabpfn"
122
- Provides-Extra: tabpfnmix
123
- Requires-Dist: torch<2.8,>=2.6; extra == "tabpfnmix"
124
- Requires-Dist: huggingface-hub[torch]; extra == "tabpfnmix"
125
- Requires-Dist: einops<0.9,>=0.7; extra == "tabpfnmix"
126
124
  Provides-Extra: tests
127
125
  Requires-Dist: interpret-core<0.8,>=0.7.2; extra == "tests"
128
- Requires-Dist: tabicl<0.2,>=0.1.3; extra == "tests"
129
- Requires-Dist: tabpfn<2.2,>=2.0.9; extra == "tests"
130
- Requires-Dist: pytabkit<1.7,>=1.6; extra == "tests"
131
- Requires-Dist: torch<2.8,>=2.6; extra == "tests"
132
- Requires-Dist: huggingface-hub[torch]; extra == "tests"
126
+ Requires-Dist: tabdpt<1.2,>=1.1.11; extra == "tests"
127
+ Requires-Dist: tabicl<0.2,>=0.1.4; extra == "tests"
128
+ Requires-Dist: tabpfn<6.2.1,>=6.2.0; extra == "tests"
129
+ Requires-Dist: pytabkit<1.8,>=1.7.2; extra == "tests"
130
+ Requires-Dist: torch<2.10,>=2.6; extra == "tests"
131
+ Requires-Dist: huggingface_hub[torch]<1.0; extra == "tests"
133
132
  Requires-Dist: einops<0.9,>=0.7; extra == "tests"
134
133
  Requires-Dist: imodels<2.1.0,>=1.3.10; extra == "tests"
135
- Requires-Dist: skl2onnx<1.18.0,>=1.15.0; extra == "tests"
136
- Requires-Dist: onnxruntime<1.20.0,>=1.17.0; extra == "tests"
137
- Requires-Dist: onnxruntime-gpu<1.20.0,>=1.17.0; extra == "tests"
138
- Requires-Dist: onnx<1.18.0,>=1.13.0; platform_system != "Windows" and extra == "tests"
139
- Requires-Dist: onnx<1.16.2,>=1.13.0; platform_system == "Windows" and extra == "tests"
140
- Provides-Extra: xgboost
141
- Requires-Dist: xgboost<3.1,>=2.0; extra == "xgboost"
134
+ Requires-Dist: skl2onnx<1.20.0,>=1.15.0; extra == "tests"
135
+ Requires-Dist: onnx!=1.16.2,<1.21.0,>=1.13.0; platform_system == "Windows" and extra == "tests"
136
+ Requires-Dist: onnx<1.21.0,>=1.13.0; platform_system != "Windows" and extra == "tests"
137
+ Requires-Dist: onnxruntime<1.24.0,>=1.17.0; extra == "tests"
138
+ Requires-Dist: onnxruntime-gpu<1.24.0,>=1.17.0; (platform_system != "Darwin" and platform_machine != "aarch64") and extra == "tests"
139
+ Dynamic: author
140
+ Dynamic: classifier
141
+ Dynamic: description
142
+ Dynamic: description-content-type
143
+ Dynamic: home-page
144
+ Dynamic: license
145
+ Dynamic: license-file
146
+ Dynamic: project-url
147
+ Dynamic: provides-extra
148
+ Dynamic: requires-dist
149
+ Dynamic: requires-python
150
+ Dynamic: summary
142
151
 
143
152
 
144
153
 
@@ -149,7 +158,7 @@ Requires-Dist: xgboost<3.1,>=2.0; extra == "xgboost"
149
158
 
150
159
  [![Latest Release](https://img.shields.io/github/v/release/autogluon/autogluon)](https://github.com/autogluon/autogluon/releases)
151
160
  [![Conda Forge](https://img.shields.io/conda/vn/conda-forge/autogluon.svg)](https://anaconda.org/conda-forge/autogluon)
152
- [![Python Versions](https://img.shields.io/badge/python-3.9%20%7C%203.10%20%7C%203.11%20%7C%203.12-blue)](https://pypi.org/project/autogluon/)
161
+ [![Python Versions](https://img.shields.io/badge/python-3.10%20%7C%203.11%20%7C%203.12%20%7C%203.13-blue)](https://pypi.org/project/autogluon/)
153
162
  [![Downloads](https://pepy.tech/badge/autogluon/month)](https://pepy.tech/project/autogluon)
154
163
  [![GitHub license](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](./LICENSE)
155
164
  [![Discord](https://img.shields.io/discord/1043248669505368144?color=7289da&label=Discord&logo=discord&logoColor=ffffff)](https://discord.gg/wjUmjqAc2N)
@@ -166,7 +175,7 @@ AutoGluon, developed by AWS AI, automates machine learning tasks enabling you to
166
175
 
167
176
  ## 💾 Installation
168
177
 
169
- AutoGluon is supported on Python 3.9 - 3.12 and is available on Linux, MacOS, and Windows.
178
+ AutoGluon is supported on Python 3.10 - 3.13 and is available on Linux, MacOS, and Windows.
170
179
 
171
180
  You can install AutoGluon with:
172
181
 
@@ -189,8 +198,8 @@ predictions = predictor.predict("test.csv")
189
198
  | AutoGluon Task | Quickstart | API |
190
199
  |:--------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------:|
191
200
  | TabularPredictor | [![Quick Start](https://img.shields.io/static/v1?label=&message=tutorial&color=grey)](https://auto.gluon.ai/stable/tutorials/tabular/tabular-quick-start.html) | [![API](https://img.shields.io/badge/api-reference-blue.svg)](https://auto.gluon.ai/stable/api/autogluon.tabular.TabularPredictor.html) |
192
- | MultiModalPredictor | [![Quick Start](https://img.shields.io/static/v1?label=&message=tutorial&color=grey)](https://auto.gluon.ai/stable/tutorials/multimodal/multimodal_prediction/multimodal-quick-start.html) | [![API](https://img.shields.io/badge/api-reference-blue.svg)](https://auto.gluon.ai/stable/api/autogluon.multimodal.MultiModalPredictor.html) |
193
201
  | TimeSeriesPredictor | [![Quick Start](https://img.shields.io/static/v1?label=&message=tutorial&color=grey)](https://auto.gluon.ai/stable/tutorials/timeseries/forecasting-quick-start.html) | [![API](https://img.shields.io/badge/api-reference-blue.svg)](https://auto.gluon.ai/stable/api/autogluon.timeseries.TimeSeriesPredictor.html) |
202
+ | MultiModalPredictor | [![Quick Start](https://img.shields.io/static/v1?label=&message=tutorial&color=grey)](https://auto.gluon.ai/stable/tutorials/multimodal/multimodal_prediction/multimodal-quick-start.html) | [![API](https://img.shields.io/badge/api-reference-blue.svg)](https://auto.gluon.ai/stable/api/autogluon.multimodal.MultiModalPredictor.html) |
194
203
 
195
204
  ## :mag: Resources
196
205
 
@@ -213,7 +222,10 @@ Below is a curated list of recent tutorials and talks on AutoGluon. A comprehens
213
222
  - [Benchmarking Multimodal AutoML for Tabular Data with Text Fields](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/file/9bf31c7ff062936a96d3c8bd1f8f2ff3-Paper-round2.pdf) (*NeurIPS*, 2021) ([BibTeX](CITING.md#autogluonmultimodal))
214
223
  - [XTab: Cross-table Pretraining for Tabular Transformers](https://proceedings.mlr.press/v202/zhu23k/zhu23k.pdf) (*ICML*, 2023)
215
224
  - [AutoGluon-TimeSeries: AutoML for Probabilistic Time Series Forecasting](https://arxiv.org/abs/2308.05566) (*AutoML Conf*, 2023) ([BibTeX](CITING.md#autogluontimeseries))
216
- - [TabRepo: A Large Scale Repository of Tabular Model Evaluations and its AutoML Applications](https://arxiv.org/pdf/2311.02971.pdf) (*Under Review*, 2024)
225
+ - [TabRepo: A Large Scale Repository of Tabular Model Evaluations and its AutoML Applications](https://arxiv.org/pdf/2311.02971.pdf) (*AutoML Conf*, 2024)
226
+ - [AutoGluon-Multimodal (AutoMM): Supercharging Multimodal AutoML with Foundation Models](https://arxiv.org/pdf/2404.16233) (*AutoML Conf*, 2024) ([BibTeX](CITING.md#autogluonmultimodal))
227
+ - [Multi-layer Stack Ensembles for Time Series Forecasting](https://arxiv.org/abs/2511.15350) (*AutoML Conf*, 2025) ([BibTeX](CITING.md#autogluontimeseries))
228
+ - [Chronos-2: From Univariate to Universal Forecasting](https://arxiv.org/abs/2510.15821) (*Arxiv*, 2025) ([BibTeX](CITING.md#autogluontimeseries))
217
229
 
218
230
  ### Articles
219
231
  - [AutoGluon-TimeSeries: Every Time Series Forecasting Model In One Library](https://towardsdatascience.com/autogluon-timeseries-every-time-series-forecasting-model-in-one-library-29a3bf6879db) (*Towards Data Science*, Jan 2024)
@@ -239,5 +251,3 @@ We are actively accepting code contributions to the AutoGluon project. If you ar
239
251
  ## :classical_building: License
240
252
 
241
253
  This library is licensed under the Apache 2.0 License.
242
-
243
-