autogluon.timeseries 1.4.1b20251206__tar.gz → 1.5.1b20260117__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of autogluon.timeseries might be problematic. Click here for more details.

Files changed (109) hide show
  1. {autogluon_timeseries-1.4.1b20251206/src/autogluon.timeseries.egg-info → autogluon_timeseries-1.5.1b20260117}/PKG-INFO +8 -8
  2. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/setup.py +1 -1
  3. autogluon_timeseries-1.5.1b20260117/src/autogluon/timeseries/configs/hyperparameter_presets.py +47 -0
  4. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/configs/predictor_presets.py +23 -39
  5. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/learner.py +3 -1
  6. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py +0 -11
  7. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/abstract/model_trial.py +2 -1
  8. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/autogluon_tabular/per_step.py +1 -1
  9. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/chronos/chronos2.py +52 -10
  10. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/chronos/model.py +7 -2
  11. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/chronos/utils.py +1 -1
  12. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/__init__.py +13 -11
  13. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/abstract.py +9 -4
  14. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/array_based/abstract.py +10 -6
  15. autogluon_timeseries-1.5.1b20260117/src/autogluon/timeseries/models/ensemble/array_based/models.py +185 -0
  16. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +1 -1
  17. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +24 -5
  18. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/per_item_greedy.py +16 -6
  19. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/weighted/abstract.py +6 -1
  20. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/weighted/basic.py +20 -7
  21. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/weighted/greedy.py +11 -4
  22. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/gluonts/abstract.py +7 -6
  23. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/gluonts/models.py +0 -7
  24. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/local/npts.py +6 -0
  25. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/local/statsforecast.py +1 -1
  26. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/multi_window/multi_window_model.py +20 -6
  27. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/hf_pretrained_model.py +95 -13
  28. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/model.py +16 -3
  29. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/predictor.py +246 -63
  30. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/trainer/ensemble_composer.py +6 -1
  31. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/trainer/trainer.py +53 -66
  32. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/utils/features.py +13 -0
  33. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/version.py +1 -1
  34. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117/src/autogluon.timeseries.egg-info}/PKG-INFO +8 -8
  35. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon.timeseries.egg-info/requires.txt +7 -7
  36. autogluon_timeseries-1.4.1b20251206/src/autogluon/timeseries/configs/hyperparameter_presets.py +0 -62
  37. autogluon_timeseries-1.4.1b20251206/src/autogluon/timeseries/models/ensemble/array_based/models.py +0 -73
  38. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/LICENSE +0 -0
  39. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/NOTICE +0 -0
  40. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/README.md +0 -0
  41. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/setup.cfg +0 -0
  42. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/__init__.py +0 -0
  43. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/configs/__init__.py +0 -0
  44. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/dataset/__init__.py +0 -0
  45. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/dataset/ts_dataframe.py +0 -0
  46. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/metrics/__init__.py +0 -0
  47. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/metrics/abstract.py +0 -0
  48. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/metrics/point.py +0 -0
  49. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/metrics/quantile.py +0 -0
  50. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/metrics/utils.py +0 -0
  51. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/__init__.py +0 -0
  52. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/abstract/__init__.py +0 -0
  53. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/abstract/tunable.py +0 -0
  54. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/autogluon_tabular/__init__.py +0 -0
  55. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py +0 -0
  56. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/autogluon_tabular/transforms.py +0 -0
  57. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/autogluon_tabular/utils.py +0 -0
  58. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/chronos/__init__.py +0 -0
  59. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/array_based/__init__.py +0 -0
  60. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +0 -0
  61. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +0 -0
  62. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +0 -0
  63. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/ensemble_selection.py +0 -0
  64. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/ensemble/weighted/__init__.py +0 -0
  65. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/gluonts/__init__.py +0 -0
  66. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/gluonts/dataset.py +0 -0
  67. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/local/__init__.py +0 -0
  68. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/local/abstract_local_model.py +0 -0
  69. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/local/naive.py +0 -0
  70. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/multi_window/__init__.py +0 -0
  71. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/registry.py +0 -0
  72. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/__init__.py +0 -0
  73. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/_internal/__init__.py +0 -0
  74. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/_internal/backbone/__init__.py +0 -0
  75. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/_internal/backbone/attention.py +0 -0
  76. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/_internal/backbone/backbone.py +0 -0
  77. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/_internal/backbone/distribution.py +0 -0
  78. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +0 -0
  79. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/_internal/backbone/rope.py +0 -0
  80. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +0 -0
  81. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/_internal/backbone/scaler.py +0 -0
  82. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/_internal/backbone/transformer.py +0 -0
  83. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/_internal/dataset.py +0 -0
  84. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/_internal/forecaster.py +0 -0
  85. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/models/toto/dataloader.py +0 -0
  86. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/regressor.py +0 -0
  87. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/splitter.py +0 -0
  88. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/trainer/__init__.py +0 -0
  89. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/trainer/model_set_builder.py +0 -0
  90. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/trainer/prediction_cache.py +0 -0
  91. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/trainer/utils.py +0 -0
  92. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/transforms/__init__.py +0 -0
  93. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/transforms/covariate_scaler.py +0 -0
  94. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/transforms/target_scaler.py +0 -0
  95. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/utils/__init__.py +0 -0
  96. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/utils/constants.py +0 -0
  97. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/utils/datetime/__init__.py +0 -0
  98. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/utils/datetime/base.py +0 -0
  99. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/utils/datetime/lags.py +0 -0
  100. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/utils/datetime/seasonality.py +0 -0
  101. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/utils/datetime/time_features.py +0 -0
  102. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/utils/forecast.py +0 -0
  103. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/utils/timer.py +0 -0
  104. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon/timeseries/utils/warning_filters.py +0 -0
  105. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon.timeseries.egg-info/SOURCES.txt +0 -0
  106. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon.timeseries.egg-info/dependency_links.txt +0 -0
  107. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon.timeseries.egg-info/namespace_packages.txt +0 -0
  108. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon.timeseries.egg-info/top_level.txt +0 -0
  109. {autogluon_timeseries-1.4.1b20251206 → autogluon_timeseries-1.5.1b20260117}/src/autogluon.timeseries.egg-info/zip-safe +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: autogluon.timeseries
3
- Version: 1.4.1b20251206
3
+ Version: 1.5.1b20260117
4
4
  Summary: Fast and Accurate ML in 3 Lines of Code
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -53,22 +53,22 @@ Requires-Dist: fugue>=0.9.0
53
53
  Requires-Dist: tqdm<5,>=4.38
54
54
  Requires-Dist: orjson~=3.9
55
55
  Requires-Dist: einops<1,>=0.7
56
- Requires-Dist: chronos-forecasting<3,>=2.2.0rc4
56
+ Requires-Dist: chronos-forecasting<2.4,>=2.2.2
57
57
  Requires-Dist: peft<0.18,>=0.13.0
58
58
  Requires-Dist: tensorboard<3,>=2.9
59
- Requires-Dist: autogluon.core==1.4.1b20251206
60
- Requires-Dist: autogluon.common==1.4.1b20251206
61
- Requires-Dist: autogluon.features==1.4.1b20251206
62
- Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.4.1b20251206
59
+ Requires-Dist: autogluon.core==1.5.1b20260117
60
+ Requires-Dist: autogluon.common==1.5.1b20260117
61
+ Requires-Dist: autogluon.features==1.5.1b20260117
62
+ Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.5.1b20260117
63
63
  Provides-Extra: tests
64
64
  Requires-Dist: pytest; extra == "tests"
65
65
  Requires-Dist: ruff>=0.0.285; extra == "tests"
66
66
  Requires-Dist: flaky<4,>=3.7; extra == "tests"
67
67
  Requires-Dist: pytest-timeout<3,>=2.1; extra == "tests"
68
68
  Provides-Extra: ray
69
- Requires-Dist: autogluon.core[raytune]==1.4.1b20251206; extra == "ray"
69
+ Requires-Dist: autogluon.core[raytune]==1.5.1b20260117; extra == "ray"
70
70
  Provides-Extra: all
71
- Requires-Dist: autogluon.core[raytune]==1.4.1b20251206; extra == "all"
71
+ Requires-Dist: autogluon.core[raytune]==1.5.1b20260117; extra == "all"
72
72
  Dynamic: author
73
73
  Dynamic: classifier
74
74
  Dynamic: description
@@ -42,7 +42,7 @@ install_requires = [
42
42
  "tqdm", # version range defined in `core/_setup_utils.py`
43
43
  "orjson~=3.9", # use faster JSON implementation in GluonTS
44
44
  "einops>=0.7,<1", # required by Chronos-2 and Toto
45
- "chronos-forecasting>=2.2.0rc4,<3",
45
+ "chronos-forecasting>=2.2.2,<2.4",
46
46
  "peft>=0.13.0,<0.18", # version range same as in chronos-forecasting[extras]
47
47
  "tensorboard>=2.9,<3", # fixes https://github.com/autogluon/autogluon/issues/3612
48
48
  f"autogluon.core=={version}",
@@ -0,0 +1,47 @@
1
+ from typing import Any
2
+
3
+
4
+ def get_hyperparameter_presets() -> dict[str, dict[str, dict[str, Any] | list[dict[str, Any]]]]:
5
+ return {
6
+ "very_light": {
7
+ "Naive": {},
8
+ "SeasonalNaive": {},
9
+ "ETS": {},
10
+ "Theta": {},
11
+ "RecursiveTabular": {"max_num_samples": 100_000},
12
+ "DirectTabular": {"max_num_samples": 100_000},
13
+ },
14
+ "light": {
15
+ "SeasonalNaive": {},
16
+ "ETS": {},
17
+ "Theta": {},
18
+ "RecursiveTabular": {},
19
+ "DirectTabular": {},
20
+ "TemporalFusionTransformer": {},
21
+ "Chronos2": {"model_path": "autogluon/chronos-2-small"},
22
+ },
23
+ "default": {
24
+ "SeasonalNaive": {},
25
+ "AutoETS": {},
26
+ "DynamicOptimizedTheta": {},
27
+ "RecursiveTabular": {},
28
+ "DirectTabular": {},
29
+ "TemporalFusionTransformer": {},
30
+ "DeepAR": {},
31
+ "Chronos2": [
32
+ {},
33
+ {
34
+ "ag_args": {"name_suffix": "SmallFineTuned"},
35
+ "model_path": "autogluon/chronos-2-small",
36
+ "fine_tune": True,
37
+ "eval_during_fine_tune": True,
38
+ },
39
+ ],
40
+ "Chronos": {
41
+ "ag_args": {"name_suffix": "WithRegressor"},
42
+ "model_path": "bolt_small",
43
+ "target_scaler": "standard",
44
+ "covariate_regressor": {"model_name": "CAT", "model_hyperparameters": {"iterations": 1000}},
45
+ },
46
+ },
47
+ }
@@ -2,10 +2,7 @@
2
2
 
3
3
  from typing import Any
4
4
 
5
- from . import get_hyperparameter_presets
6
-
7
5
  TIMESERIES_PRESETS_ALIASES = dict(
8
- chronos="chronos_small",
9
6
  best="best_quality",
10
7
  high="high_quality",
11
8
  medium="medium_quality",
@@ -16,13 +13,33 @@ TIMESERIES_PRESETS_ALIASES = dict(
16
13
 
17
14
 
18
15
  def get_predictor_presets() -> dict[str, Any]:
19
- hp_presets = get_hyperparameter_presets()
20
-
21
16
  predictor_presets = dict(
22
- best_quality={"hyperparameters": "default", "num_val_windows": 2},
17
+ best_quality={"hyperparameters": "default", "num_val_windows": "auto", "refit_every_n_windows": "auto"},
23
18
  high_quality={"hyperparameters": "default"},
24
19
  medium_quality={"hyperparameters": "light"},
25
20
  fast_training={"hyperparameters": "very_light"},
21
+ # Chronos-2 models
22
+ chronos2={
23
+ "hyperparameters": {"Chronos2": {"model_path": "autogluon/chronos-2"}},
24
+ "skip_model_selection": True,
25
+ },
26
+ chronos2_small={
27
+ "hyperparameters": {"Chronos2": {"model_path": "autogluon/chronos-2-small"}},
28
+ "skip_model_selection": True,
29
+ },
30
+ chronos2_ensemble={
31
+ "hyperparameters": {
32
+ "Chronos2": [
33
+ {"model_path": "autogluon/chronos-2", "ag_args": {"name_suffix": "ZeroShot"}},
34
+ {
35
+ "model_path": "autogluon/chronos-2-small",
36
+ "fine_tune": True,
37
+ "eval_during_fine_tune": True,
38
+ "ag_args": {"name_suffix": "SmallFineTuned"},
39
+ },
40
+ ]
41
+ },
42
+ },
26
43
  # Chronos-Bolt models
27
44
  bolt_tiny={
28
45
  "hyperparameters": {"Chronos": {"model_path": "bolt_tiny"}},
@@ -40,39 +57,6 @@ def get_predictor_presets() -> dict[str, Any]:
40
57
  "hyperparameters": {"Chronos": {"model_path": "bolt_base"}},
41
58
  "skip_model_selection": True,
42
59
  },
43
- # Original Chronos models
44
- chronos_tiny={
45
- "hyperparameters": {"Chronos": {"model_path": "tiny"}},
46
- "skip_model_selection": True,
47
- },
48
- chronos_mini={
49
- "hyperparameters": {"Chronos": {"model_path": "mini"}},
50
- "skip_model_selection": True,
51
- },
52
- chronos_small={
53
- "hyperparameters": {"Chronos": {"model_path": "small"}},
54
- "skip_model_selection": True,
55
- },
56
- chronos_base={
57
- "hyperparameters": {"Chronos": {"model_path": "base"}},
58
- "skip_model_selection": True,
59
- },
60
- chronos_large={
61
- "hyperparameters": {"Chronos": {"model_path": "large", "batch_size": 8}},
62
- "skip_model_selection": True,
63
- },
64
- chronos_ensemble={
65
- "hyperparameters": {
66
- "Chronos": {"model_path": "small"},
67
- **hp_presets["light_inference"],
68
- }
69
- },
70
- chronos_large_ensemble={
71
- "hyperparameters": {
72
- "Chronos": {"model_path": "large", "batch_size": 8},
73
- **hp_presets["light_inference"],
74
- }
75
- },
76
60
  )
77
61
 
78
62
  # update with aliases
@@ -58,8 +58,9 @@ class TimeSeriesLearner(AbstractLearner):
58
58
  hyperparameters: str | dict,
59
59
  val_data: TimeSeriesDataFrame | None = None,
60
60
  hyperparameter_tune_kwargs: str | dict | None = None,
61
+ ensemble_hyperparameters: dict[str, Any] | list[dict[str, Any]] | None = None,
61
62
  time_limit: float | None = None,
62
- num_val_windows: int = 1,
63
+ num_val_windows: tuple[int, ...] = (1,),
63
64
  val_step_size: int | None = None,
64
65
  refit_every_n_windows: int | None = 1,
65
66
  random_seed: int | None = None,
@@ -112,6 +113,7 @@ class TimeSeriesLearner(AbstractLearner):
112
113
  val_data=val_data,
113
114
  hyperparameters=hyperparameters,
114
115
  hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
116
+ ensemble_hyperparameters=ensemble_hyperparameters,
115
117
  excluded_model_types=kwargs.get("excluded_model_types"),
116
118
  time_limit=time_limit,
117
119
  random_seed=random_seed,
@@ -668,7 +668,6 @@ class AbstractTimeSeriesModel(TimeSeriesModelBase, TimeSeriesTunable, metaclass=
668
668
  pass
669
669
 
670
670
  def _preprocess_time_limit(self, time_limit: float) -> float:
671
- original_time_limit = time_limit
672
671
  max_time_limit_ratio = self._extra_ag_args.get("max_time_limit_ratio", self.default_max_time_limit_ratio)
673
672
  max_time_limit = self._extra_ag_args.get("max_time_limit")
674
673
 
@@ -677,16 +676,6 @@ class AbstractTimeSeriesModel(TimeSeriesModelBase, TimeSeriesTunable, metaclass=
677
676
  if max_time_limit is not None:
678
677
  time_limit = min(time_limit, max_time_limit)
679
678
 
680
- if original_time_limit != time_limit:
681
- time_limit_og_str = f"{original_time_limit:.2f}s" if original_time_limit is not None else "None"
682
- time_limit_str = f"{time_limit:.2f}s" if time_limit is not None else "None"
683
- logger.debug(
684
- f"\tTime limit adjusted due to model hyperparameters: "
685
- f"{time_limit_og_str} -> {time_limit_str} "
686
- f"(ag.max_time_limit={max_time_limit}, "
687
- f"ag.max_time_limit_ratio={max_time_limit_ratio}"
688
- )
689
-
690
679
  return time_limit
691
680
 
692
681
  def _get_search_space(self):
@@ -76,7 +76,8 @@ def fit_and_save_model(model, fit_kwargs, train_data, val_data, eval_metric, tim
76
76
  time_fit_start = time.time()
77
77
  model.fit(train_data=train_data, val_data=val_data, time_limit=time_left, **fit_kwargs)
78
78
  model.fit_time = time.time() - time_fit_start
79
- model.score_and_cache_oof(val_data, store_val_score=True, store_predict_time=True)
79
+ if val_data is not None:
80
+ model.score_and_cache_oof(val_data, store_val_score=True, store_predict_time=True)
80
81
 
81
82
  logger.debug(f"\tHyperparameter tune run: {model.name}")
82
83
  logger.debug(f"\t\t{model.val_score:<7.4f}".ljust(15) + f"= Validation score ({eval_metric.name_with_sign})")
@@ -80,7 +80,7 @@ class PerStepTabularModel(AbstractTimeSeriesModel):
80
80
  If None, automatically determined based on available memory to prevent OOM errors.
81
81
  """
82
82
 
83
- ag_priority = 70
83
+ ag_priority = 80
84
84
  _dummy_freq = "D"
85
85
 
86
86
  def __init__(self, *args, **kwargs):
@@ -74,9 +74,16 @@ class Chronos2Model(AbstractTimeSeriesModel):
74
74
  a default configuration will be used. Example: ``{"r": 8, "lora_alpha": 16}``.
75
75
  fine_tune_trainer_kwargs : dict, optional
76
76
  Extra keyword arguments passed to ``transformers.TrainingArguments``
77
+ revision : str, default = None
78
+ Model revision to use (branch name or commit hash). If None, the default branch (usually "main") is used.
79
+ disable_known_covariates : bool, default = False
80
+ If True, known covariates won't be used by the model even if they are present in the dataset.
81
+ disable_past_covariates : bool, default = False
82
+ If True, past covariates won't be used by the model even if they are present in the dataset.
77
83
  """
78
84
 
79
85
  ag_model_aliases = ["Chronos-2"]
86
+ ag_priority = 75
80
87
  fine_tuned_ckpt_name: str = "fine-tuned-ckpt"
81
88
 
82
89
  _supports_known_covariates = True
@@ -140,7 +147,7 @@ class Chronos2Model(AbstractTimeSeriesModel):
140
147
  self.load_model_pipeline()
141
148
 
142
149
  # NOTE: This must be placed after load_model_pipeline to ensure that the loggers are available in loggerDict
143
- self._update_transformers_loggers(logging.ERROR if verbosity <= 3 else logging.INFO)
150
+ self._update_transformers_loggers(logging.ERROR if verbosity <= 3 else logging.WARNING)
144
151
 
145
152
  if self.get_hyperparameter("fine_tune"):
146
153
  self._fine_tune(train_data, val_data, time_limit=time_limit, verbosity=verbosity)
@@ -172,6 +179,9 @@ class Chronos2Model(AbstractTimeSeriesModel):
172
179
  "eval_during_fine_tune": False,
173
180
  "fine_tune_eval_max_items": 256,
174
181
  "fine_tune_lora_config": None,
182
+ "revision": None,
183
+ "disable_known_covariates": False,
184
+ "disable_past_covariates": False,
175
185
  }
176
186
 
177
187
  @property
@@ -192,14 +202,35 @@ class Chronos2Model(AbstractTimeSeriesModel):
192
202
  "fine_tune_eval_max_items",
193
203
  "fine_tune_lora_config",
194
204
  "fine_tune_trainer_kwargs",
205
+ "revision",
206
+ "disable_known_covariates",
207
+ "disable_past_covariates",
195
208
  ]
196
209
 
210
+ def _remove_disabled_covariates(
211
+ self, past_df: pd.DataFrame, future_df: pd.DataFrame | None
212
+ ) -> tuple[pd.DataFrame, pd.DataFrame | None]:
213
+ """Remove covariates from dataframes based on disable flags."""
214
+ cols_to_remove = []
215
+ if self.get_hyperparameter("disable_past_covariates"):
216
+ cols_to_remove.extend(self.covariate_metadata.past_covariates)
217
+ if self.get_hyperparameter("disable_known_covariates"):
218
+ cols_to_remove.extend(self.covariate_metadata.known_covariates)
219
+ future_df = None
220
+
221
+ if cols_to_remove:
222
+ past_df = past_df.drop(columns=cols_to_remove)
223
+
224
+ return past_df, future_df
225
+
197
226
  def _predict(
198
227
  self,
199
228
  data: TimeSeriesDataFrame,
200
229
  known_covariates: TimeSeriesDataFrame | None = None,
201
230
  **kwargs,
202
231
  ) -> TimeSeriesDataFrame:
232
+ from .utils import timeout_callback
233
+
203
234
  if self._model_pipeline is None:
204
235
  self.load_model_pipeline()
205
236
  assert self._model_pipeline is not None
@@ -220,6 +251,9 @@ class Chronos2Model(AbstractTimeSeriesModel):
220
251
  cross_learning = self.get_hyperparameter("cross_learning")
221
252
  context_length = self.get_hyperparameter("context_length")
222
253
  future_df = known_covariates.reset_index().to_data_frame() if known_covariates is not None else None
254
+ time_limit = kwargs.get("time_limit")
255
+
256
+ context_df, future_df = self._remove_disabled_covariates(context_df, future_df)
223
257
 
224
258
  forecast_df = self._model_pipeline.predict_df(
225
259
  df=context_df,
@@ -231,6 +265,7 @@ class Chronos2Model(AbstractTimeSeriesModel):
231
265
  batch_size=batch_size,
232
266
  validate_inputs=False,
233
267
  cross_learning=cross_learning,
268
+ after_batch=timeout_callback(time_limit),
234
269
  )
235
270
 
236
271
  forecast_df = forecast_df.rename(columns={"predictions": "mean"}).drop(columns="target_name")
@@ -243,7 +278,11 @@ class Chronos2Model(AbstractTimeSeriesModel):
243
278
  device = (self.get_hyperparameter("device") or "cuda") if self._is_gpu_available() else "cpu"
244
279
 
245
280
  assert self.model_path is not None
246
- pipeline = Chronos2Pipeline.from_pretrained(self.model_path, device_map=device)
281
+ pipeline = Chronos2Pipeline.from_pretrained(
282
+ self.model_path,
283
+ device_map=device,
284
+ revision=self.get_hyperparameter("revision"),
285
+ )
247
286
 
248
287
  self._model_pipeline = pipeline
249
288
 
@@ -269,8 +308,11 @@ class Chronos2Model(AbstractTimeSeriesModel):
269
308
  from .utils import LoggerCallback, TimeLimitCallback
270
309
 
271
310
  def convert_data(df: TimeSeriesDataFrame):
311
+ past_df = df.reset_index().to_data_frame()
312
+ past_df, _ = self._remove_disabled_covariates(past_df, None)
313
+
272
314
  inputs, _, _ = convert_df_input_to_list_of_dicts_input(
273
- df=df.reset_index().to_data_frame(),
315
+ df=past_df,
274
316
  future_df=None,
275
317
  target_columns=[self.target],
276
318
  prediction_length=self.prediction_length,
@@ -280,13 +322,13 @@ class Chronos2Model(AbstractTimeSeriesModel):
280
322
  # The above utility will only split the dataframe into target and past_covariates, where past_covariates contains
281
323
  # past values of both past-only and known-future covariates. We need to add future_covariates to enable fine-tuning
282
324
  # with known covariates by indicating which covariates are known in the future.
283
- known_covariates = self.covariate_metadata.known_covariates
284
-
285
- if len(known_covariates) > 0:
286
- for input_dict in inputs:
287
- # NOTE: the covariates are empty because the actual values are not used
288
- # This only indicates which covariates are known in the future
289
- input_dict["future_covariates"] = {name: np.array([]) for name in known_covariates}
325
+ if not self.get_hyperparameter("disable_known_covariates"):
326
+ known_covariates = self.covariate_metadata.known_covariates
327
+ if len(known_covariates) > 0:
328
+ for input_dict in inputs:
329
+ # NOTE: the covariates are empty because the actual values are not used
330
+ # This only indicates which covariates are known in the future
331
+ input_dict["future_covariates"] = {name: np.array([]) for name in known_covariates}
290
332
 
291
333
  return inputs
292
334
 
@@ -186,6 +186,8 @@ class ChronosModel(AbstractTimeSeriesModel):
186
186
  Extra keyword arguments passed to ``transformers.TrainingArguments``
187
187
  keep_transformers_logs : bool, default = False
188
188
  If True, the logs generated by transformers will NOT be removed after fine-tuning
189
+ revision : str, default = None
190
+ Model revision to use (branch name or commit hash). If None, the default branch (usually "main") is used.
189
191
  """
190
192
 
191
193
  ag_priority = 55
@@ -319,6 +321,7 @@ class ChronosModel(AbstractTimeSeriesModel):
319
321
  self.model_path,
320
322
  device_map=device,
321
323
  torch_dtype=self.torch_dtype,
324
+ revision=self.get_hyperparameter("revision"),
322
325
  )
323
326
 
324
327
  self._model_pipeline = pipeline
@@ -361,6 +364,7 @@ class ChronosModel(AbstractTimeSeriesModel):
361
364
  "eval_during_fine_tune": False,
362
365
  "fine_tune_eval_max_items": 256,
363
366
  "fine_tune_shuffle_buffer_size": 10_000,
367
+ "revision": None,
364
368
  }
365
369
 
366
370
  @property
@@ -382,6 +386,7 @@ class ChronosModel(AbstractTimeSeriesModel):
382
386
  "fine_tune_eval_max_items",
383
387
  "fine_tune_trainer_kwargs",
384
388
  "keep_transformers_logs",
389
+ "revision",
385
390
  ]
386
391
 
387
392
  def _get_fine_tune_trainer_kwargs(self, init_args, eval_during_fine_tune: bool):
@@ -466,7 +471,7 @@ class ChronosModel(AbstractTimeSeriesModel):
466
471
  for logger_name in logging.root.manager.loggerDict:
467
472
  if "transformers" in logger_name:
468
473
  transformers_logger = logging.getLogger(logger_name)
469
- transformers_logger.setLevel(logging.ERROR if verbosity <= 3 else logging.INFO)
474
+ transformers_logger.setLevel(logging.ERROR if verbosity <= 3 else logging.WARNING)
470
475
 
471
476
  self._check_fit_params()
472
477
  self._log_unused_hyperparameters()
@@ -634,7 +639,7 @@ class ChronosModel(AbstractTimeSeriesModel):
634
639
  batch_size=batch_size,
635
640
  shuffle=False,
636
641
  num_workers=num_workers,
637
- on_batch=timeout_callback(seconds=time_limit),
642
+ after_batch=timeout_callback(seconds=time_limit),
638
643
  )
639
644
 
640
645
  def _get_context_length(self, data: TimeSeriesDataFrame) -> int:
@@ -255,7 +255,7 @@ class ChronosInferenceDataset:
255
255
 
256
256
  class ChronosInferenceDataLoader(torch.utils.data.DataLoader):
257
257
  def __init__(self, *args, **kwargs):
258
- self.callback: Callable = kwargs.pop("on_batch", lambda: None)
258
+ self.callback: Callable = kwargs.pop("after_batch", lambda: None)
259
259
  super().__init__(*args, **kwargs)
260
260
 
261
261
  def __iter__(self): # type: ignore
@@ -6,19 +6,21 @@ from .weighted import GreedyEnsemble, PerformanceWeightedEnsemble, SimpleAverage
6
6
 
7
7
  def get_ensemble_class(name: str):
8
8
  mapping = {
9
- "GreedyEnsemble": GreedyEnsemble,
10
- "PerItemGreedyEnsemble": PerItemGreedyEnsemble,
11
- "PerformanceWeightedEnsemble": PerformanceWeightedEnsemble,
12
- "SimpleAverageEnsemble": SimpleAverageEnsemble,
13
- "WeightedEnsemble": GreedyEnsemble, # old alias for this model
14
- "MedianEnsemble": MedianEnsemble,
15
- "TabularEnsemble": TabularEnsemble,
16
- "PerQuantileTabularEnsemble": PerQuantileTabularEnsemble,
17
- "LinearStackerEnsemble": LinearStackerEnsemble,
9
+ "Greedy": GreedyEnsemble,
10
+ "PerItemGreedy": PerItemGreedyEnsemble,
11
+ "PerformanceWeighted": PerformanceWeightedEnsemble,
12
+ "SimpleAverage": SimpleAverageEnsemble,
13
+ "Weighted": GreedyEnsemble, # old alias for this model
14
+ "Median": MedianEnsemble,
15
+ "Tabular": TabularEnsemble,
16
+ "PerQuantileTabular": PerQuantileTabularEnsemble,
17
+ "LinearStacker": LinearStackerEnsemble,
18
18
  }
19
- if name not in mapping:
19
+
20
+ name_clean = name.removesuffix("Ensemble")
21
+ if name_clean not in mapping:
20
22
  raise ValueError(f"Unknown ensemble type: {name}. Available: {list(mapping.keys())}")
21
- return mapping[name]
23
+ return mapping[name_clean]
22
24
 
23
25
 
24
26
  __all__ = [
@@ -11,7 +11,12 @@ logger = logging.getLogger(__name__)
11
11
 
12
12
 
13
13
  class AbstractTimeSeriesEnsembleModel(TimeSeriesModelBase, ABC):
14
- """Abstract class for time series ensemble models."""
14
+ """Abstract base class for time series ensemble models that combine predictions from multiple base models.
15
+
16
+ Ensemble training process operates on validation predictions from base models rather than raw time series
17
+ data. This allows the ensemble to learn optimal combination strategies based on each model's performance
18
+ across different validation windows and time series patterns.
19
+ """
15
20
 
16
21
  @property
17
22
  @abstractmethod
@@ -49,7 +54,7 @@ class AbstractTimeSeriesEnsembleModel(TimeSeriesModelBase, ABC):
49
54
  )
50
55
  raise TimeLimitExceeded
51
56
  if isinstance(data_per_window, TimeSeriesDataFrame):
52
- raise ValueError("When fitting ensemble, `data` should contain ground truth for each validation window")
57
+ raise ValueError("When fitting ensemble, ``data`` should contain ground truth for each validation window")
53
58
  num_val_windows = len(data_per_window)
54
59
  for model, preds in predictions_per_window.items():
55
60
  if len(preds) != num_val_windows:
@@ -69,8 +74,8 @@ class AbstractTimeSeriesEnsembleModel(TimeSeriesModelBase, ABC):
69
74
  model_scores: dict[str, float] | None = None,
70
75
  time_limit: float | None = None,
71
76
  ) -> None:
72
- """Private method for `fit`. See `fit` for documentation of arguments. Apart from the model
73
- training logic, `fit` additionally implements other logic such as keeping track of the time limit.
77
+ """Private method for ``fit``. See ``fit`` for documentation of arguments. Apart from the model
78
+ training logic, ``fit`` additionally implements other logic such as keeping track of the time limit.
74
79
  """
75
80
  raise NotImplementedError
76
81
 
@@ -12,15 +12,19 @@ from .regressor import EnsembleRegressor
12
12
 
13
13
 
14
14
  class ArrayBasedTimeSeriesEnsembleModel(AbstractTimeSeriesEnsembleModel, ABC):
15
- """Abstract base class for time series ensemble models which operate on arrays of base model
16
- predictions for training and inference.
15
+ """Abstract base class for ensemble models that operate on multi-dimensional arrays of base model predictions.
16
+
17
+ Array-based ensembles convert time series predictions into structured numpy arrays for efficient processing
18
+ and enable sophisticated combination strategies beyond simple weighted averaging. Array-based ensembles also
19
+ support isotonization in quantile forecasts--ensuring quantile crossing does not occur. They also have built-in
20
+ failed model detection and filtering capabilities.
17
21
 
18
22
  Other Parameters
19
23
  ----------------
20
- isotonization: str, default = "sort"
24
+ isotonization : str, default = "sort"
21
25
  The isotonization method to use (i.e. the algorithm to prevent quantile non-crossing).
22
26
  Currently only "sort" is supported.
23
- detect_and_ignore_failures: bool, default = True
27
+ detect_and_ignore_failures : bool, default = True
24
28
  Whether to detect and ignore "failed models", defined as models which have a loss that is larger
25
29
  than 10x the median loss of all the models. This can be very important for the regression-based
26
30
  ensembles, as moving the weight from such a "failed model" to zero can require a long training
@@ -67,8 +71,8 @@ class ArrayBasedTimeSeriesEnsembleModel(AbstractTimeSeriesEnsembleModel, ABC):
67
71
  Parameters
68
72
  ----------
69
73
  df
70
- TimeSeriesDataFrame to convert to an array. Must contain exactly `prediction_length`
71
- values for each item. The columns of `df` can correspond to ground truth values
74
+ TimeSeriesDataFrame to convert to an array. Must contain exactly ``prediction_length``
75
+ values for each item. The columns of ``df`` can correspond to ground truth values
72
76
  or predictions (in which case, these will be the mean or quantile forecasts).
73
77
 
74
78
  Returns