autogluon.timeseries 1.4.1b20251128__tar.gz → 1.4.1b20251206__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of autogluon.timeseries might be problematic. Click here for more details.
- {autogluon_timeseries-1.4.1b20251128/src/autogluon.timeseries.egg-info → autogluon_timeseries-1.4.1b20251206}/PKG-INFO +14 -15
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/setup.py +7 -8
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/configs/hyperparameter_presets.py +2 -2
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/dataset/ts_dataframe.py +32 -34
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/learner.py +39 -39
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/metrics/__init__.py +4 -4
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/metrics/abstract.py +8 -8
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/metrics/point.py +9 -9
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/metrics/quantile.py +4 -4
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/__init__.py +2 -1
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/abstract/abstract_timeseries_model.py +42 -38
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/abstract/tunable.py +8 -8
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/autogluon_tabular/mlforecast.py +25 -25
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/autogluon_tabular/per_step.py +12 -10
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/autogluon_tabular/transforms.py +2 -2
- autogluon_timeseries-1.4.1b20251206/src/autogluon/timeseries/models/chronos/__init__.py +4 -0
- autogluon_timeseries-1.4.1b20251206/src/autogluon/timeseries/models/chronos/chronos2.py +353 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/chronos/model.py +22 -22
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/chronos/utils.py +4 -4
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/abstract.py +4 -5
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/array_based/abstract.py +12 -12
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/array_based/regressor/abstract.py +2 -3
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/array_based/regressor/linear_stacker.py +3 -3
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/array_based/regressor/per_quantile_tabular.py +2 -3
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/array_based/regressor/tabular.py +2 -3
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/ensemble_selection.py +8 -9
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/per_item_greedy.py +6 -6
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/weighted/abstract.py +1 -2
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/weighted/basic.py +5 -5
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/weighted/greedy.py +4 -4
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/gluonts/abstract.py +23 -23
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/gluonts/dataset.py +9 -9
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/local/abstract_local_model.py +12 -12
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/local/naive.py +2 -2
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/local/npts.py +1 -1
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/local/statsforecast.py +12 -12
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/multi_window/multi_window_model.py +16 -15
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/registry.py +3 -4
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/_internal/backbone/attention.py +3 -4
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/_internal/backbone/backbone.py +6 -6
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/_internal/backbone/rope.py +4 -9
- autogluon_timeseries-1.4.1b20251206/src/autogluon/timeseries/models/toto/_internal/backbone/rotary_embedding_torch.py +342 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/_internal/backbone/scaler.py +2 -3
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/_internal/backbone/transformer.py +10 -10
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/_internal/dataset.py +2 -2
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/_internal/forecaster.py +8 -8
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/dataloader.py +4 -4
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/hf_pretrained_model.py +2 -3
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/model.py +14 -14
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/predictor.py +84 -86
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/regressor.py +17 -17
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/splitter.py +2 -2
- autogluon_timeseries-1.4.1b20251206/src/autogluon/timeseries/trainer/ensemble_composer.py +439 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/trainer/model_set_builder.py +9 -9
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/trainer/prediction_cache.py +16 -16
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/trainer/trainer.py +146 -113
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/trainer/utils.py +3 -4
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/transforms/covariate_scaler.py +7 -7
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/transforms/target_scaler.py +8 -8
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/utils/datetime/lags.py +1 -3
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/utils/datetime/seasonality.py +1 -3
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/utils/features.py +9 -9
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/utils/forecast.py +1 -2
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/utils/timer.py +4 -5
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/version.py +1 -1
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206/src/autogluon.timeseries.egg-info}/PKG-INFO +14 -15
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon.timeseries.egg-info/SOURCES.txt +2 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon.timeseries.egg-info/requires.txt +14 -15
- autogluon_timeseries-1.4.1b20251128/src/autogluon/timeseries/models/chronos/__init__.py +0 -3
- autogluon_timeseries-1.4.1b20251128/src/autogluon/timeseries/trainer/ensemble_composer.py +0 -221
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/LICENSE +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/NOTICE +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/README.md +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/setup.cfg +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/configs/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/configs/predictor_presets.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/dataset/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/metrics/utils.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/abstract/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/abstract/model_trial.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/autogluon_tabular/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/autogluon_tabular/utils.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/array_based/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/array_based/models.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/array_based/regressor/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/ensemble/weighted/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/gluonts/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/gluonts/models.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/local/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/multi_window/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/_internal/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/_internal/backbone/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/_internal/backbone/distribution.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/models/toto/_internal/backbone/kvcache.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/trainer/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/transforms/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/utils/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/utils/constants.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/utils/datetime/__init__.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/utils/datetime/base.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/utils/datetime/time_features.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon/timeseries/utils/warning_filters.py +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon.timeseries.egg-info/dependency_links.txt +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon.timeseries.egg-info/namespace_packages.txt +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon.timeseries.egg-info/top_level.txt +0 -0
- {autogluon_timeseries-1.4.1b20251128 → autogluon_timeseries-1.4.1b20251206}/src/autogluon.timeseries.egg-info/zip-safe +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: autogluon.timeseries
|
|
3
|
-
Version: 1.4.
|
|
3
|
+
Version: 1.4.1b20251206
|
|
4
4
|
Summary: Fast and Accurate ML in 3 Lines of Code
|
|
5
5
|
Home-page: https://github.com/autogluon/autogluon
|
|
6
6
|
Author: AutoGluon Community
|
|
@@ -39,10 +39,9 @@ Requires-Dist: joblib<1.7,>=1.2
|
|
|
39
39
|
Requires-Dist: numpy<2.4.0,>=1.25.0
|
|
40
40
|
Requires-Dist: scipy<1.17,>=1.5.4
|
|
41
41
|
Requires-Dist: pandas<2.4.0,>=2.0.0
|
|
42
|
-
Requires-Dist: torch<2.
|
|
43
|
-
Requires-Dist: lightning<2.
|
|
44
|
-
Requires-Dist:
|
|
45
|
-
Requires-Dist: transformers[sentencepiece]<4.50,>=4.38.0
|
|
42
|
+
Requires-Dist: torch<2.10,>=2.6
|
|
43
|
+
Requires-Dist: lightning<2.6,>=2.5.1
|
|
44
|
+
Requires-Dist: transformers[sentencepiece]<4.58,>=4.51.0
|
|
46
45
|
Requires-Dist: accelerate<2.0,>=0.34.0
|
|
47
46
|
Requires-Dist: gluonts<0.17,>=0.15.0
|
|
48
47
|
Requires-Dist: networkx<4,>=3.0
|
|
@@ -53,23 +52,23 @@ Requires-Dist: coreforecast<0.0.17,>=0.0.12
|
|
|
53
52
|
Requires-Dist: fugue>=0.9.0
|
|
54
53
|
Requires-Dist: tqdm<5,>=4.38
|
|
55
54
|
Requires-Dist: orjson~=3.9
|
|
56
|
-
Requires-Dist:
|
|
55
|
+
Requires-Dist: einops<1,>=0.7
|
|
56
|
+
Requires-Dist: chronos-forecasting<3,>=2.2.0rc4
|
|
57
|
+
Requires-Dist: peft<0.18,>=0.13.0
|
|
57
58
|
Requires-Dist: tensorboard<3,>=2.9
|
|
58
|
-
Requires-Dist: autogluon.core
|
|
59
|
-
Requires-Dist: autogluon.common==1.4.
|
|
60
|
-
Requires-Dist: autogluon.features==1.4.
|
|
61
|
-
Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.4.
|
|
59
|
+
Requires-Dist: autogluon.core==1.4.1b20251206
|
|
60
|
+
Requires-Dist: autogluon.common==1.4.1b20251206
|
|
61
|
+
Requires-Dist: autogluon.features==1.4.1b20251206
|
|
62
|
+
Requires-Dist: autogluon.tabular[catboost,lightgbm,xgboost]==1.4.1b20251206
|
|
62
63
|
Provides-Extra: tests
|
|
63
64
|
Requires-Dist: pytest; extra == "tests"
|
|
64
65
|
Requires-Dist: ruff>=0.0.285; extra == "tests"
|
|
65
66
|
Requires-Dist: flaky<4,>=3.7; extra == "tests"
|
|
66
67
|
Requires-Dist: pytest-timeout<3,>=2.1; extra == "tests"
|
|
67
|
-
Provides-Extra:
|
|
68
|
-
Requires-Dist:
|
|
69
|
-
Requires-Dist: rotary-embedding-torch<1,>=0.8; extra == "toto"
|
|
68
|
+
Provides-Extra: ray
|
|
69
|
+
Requires-Dist: autogluon.core[raytune]==1.4.1b20251206; extra == "ray"
|
|
70
70
|
Provides-Extra: all
|
|
71
|
-
Requires-Dist:
|
|
72
|
-
Requires-Dist: einops<1,>=0.7; extra == "all"
|
|
71
|
+
Requires-Dist: autogluon.core[raytune]==1.4.1b20251206; extra == "all"
|
|
73
72
|
Dynamic: author
|
|
74
73
|
Dynamic: classifier
|
|
75
74
|
Dynamic: description
|
|
@@ -30,7 +30,6 @@ install_requires = [
|
|
|
30
30
|
"pandas", # version range defined in `core/_setup_utils.py`
|
|
31
31
|
"torch", # version range defined in `core/_setup_utils.py`
|
|
32
32
|
"lightning", # version range defined in `core/_setup_utils.py`
|
|
33
|
-
"pytorch_lightning", # version range defined in `core/_setup_utils.py`
|
|
34
33
|
"transformers[sentencepiece]", # version range defined in `core/_setup_utils.py`
|
|
35
34
|
"accelerate", # version range defined in `core/_setup_utils.py`
|
|
36
35
|
"gluonts>=0.15.0,<0.17",
|
|
@@ -42,10 +41,11 @@ install_requires = [
|
|
|
42
41
|
"fugue>=0.9.0", # prevent dependency clash with omegaconf
|
|
43
42
|
"tqdm", # version range defined in `core/_setup_utils.py`
|
|
44
43
|
"orjson~=3.9", # use faster JSON implementation in GluonTS
|
|
45
|
-
"
|
|
46
|
-
|
|
44
|
+
"einops>=0.7,<1", # required by Chronos-2 and Toto
|
|
45
|
+
"chronos-forecasting>=2.2.0rc4,<3",
|
|
46
|
+
"peft>=0.13.0,<0.18", # version range same as in chronos-forecasting[extras]
|
|
47
47
|
"tensorboard>=2.9,<3", # fixes https://github.com/autogluon/autogluon/issues/3612
|
|
48
|
-
f"autogluon.core
|
|
48
|
+
f"autogluon.core=={version}",
|
|
49
49
|
f"autogluon.common=={version}",
|
|
50
50
|
f"autogluon.features=={version}",
|
|
51
51
|
f"autogluon.tabular[catboost,lightgbm,xgboost]=={version}",
|
|
@@ -58,13 +58,12 @@ extras_require = {
|
|
|
58
58
|
"flaky>=3.7,<4",
|
|
59
59
|
"pytest-timeout>=2.1,<3",
|
|
60
60
|
],
|
|
61
|
-
"
|
|
62
|
-
"
|
|
63
|
-
"rotary-embedding-torch>=0.8,<1",
|
|
61
|
+
"ray": [
|
|
62
|
+
f"autogluon.core[raytune]=={version}",
|
|
64
63
|
],
|
|
65
64
|
}
|
|
66
65
|
|
|
67
|
-
extras_require["all"] = list(set.union(*(set(extras_require[extra]) for extra in ["
|
|
66
|
+
extras_require["all"] = list(set.union(*(set(extras_require[extra]) for extra in ["ray"])))
|
|
68
67
|
install_requires = ag.get_dependency_version_ranges(install_requires)
|
|
69
68
|
|
|
70
69
|
if __name__ == "__main__":
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
from typing import Any
|
|
1
|
+
from typing import Any
|
|
2
2
|
|
|
3
3
|
|
|
4
|
-
def get_hyperparameter_presets() -> dict[str, dict[str,
|
|
4
|
+
def get_hyperparameter_presets() -> dict[str, dict[str, dict[str, Any] | list[dict[str, Any]]]]:
|
|
5
5
|
return {
|
|
6
6
|
"very_light": {
|
|
7
7
|
"Naive": {},
|
|
@@ -7,7 +7,7 @@ import reprlib
|
|
|
7
7
|
from collections.abc import Iterable
|
|
8
8
|
from itertools import islice
|
|
9
9
|
from pathlib import Path
|
|
10
|
-
from typing import TYPE_CHECKING, Any, Final,
|
|
10
|
+
from typing import TYPE_CHECKING, Any, Final, Type, overload
|
|
11
11
|
|
|
12
12
|
import numpy as np
|
|
13
13
|
import pandas as pd
|
|
@@ -122,10 +122,10 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
122
122
|
|
|
123
123
|
def __init__(
|
|
124
124
|
self,
|
|
125
|
-
data:
|
|
126
|
-
static_features:
|
|
127
|
-
id_column:
|
|
128
|
-
timestamp_column:
|
|
125
|
+
data: pd.DataFrame | str | Path | Iterable,
|
|
126
|
+
static_features: pd.DataFrame | str | Path | None = None,
|
|
127
|
+
id_column: str | None = None,
|
|
128
|
+
timestamp_column: str | None = None,
|
|
129
129
|
num_cpus: int = -1,
|
|
130
130
|
*args,
|
|
131
131
|
**kwargs,
|
|
@@ -149,7 +149,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
149
149
|
else:
|
|
150
150
|
raise ValueError(f"data must be a pd.DataFrame, Iterable, string or Path (received {type(data)}).")
|
|
151
151
|
super().__init__(data=data, *args, **kwargs) # type: ignore
|
|
152
|
-
self._static_features:
|
|
152
|
+
self._static_features: pd.DataFrame | None = None
|
|
153
153
|
if static_features is not None:
|
|
154
154
|
self.static_features = self._construct_static_features(static_features, id_column=id_column)
|
|
155
155
|
|
|
@@ -168,8 +168,8 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
168
168
|
def _construct_tsdf_from_data_frame(
|
|
169
169
|
cls,
|
|
170
170
|
df: pd.DataFrame,
|
|
171
|
-
id_column:
|
|
172
|
-
timestamp_column:
|
|
171
|
+
id_column: str | None = None,
|
|
172
|
+
timestamp_column: str | None = None,
|
|
173
173
|
) -> pd.DataFrame:
|
|
174
174
|
df = df.copy()
|
|
175
175
|
if id_column is not None:
|
|
@@ -272,9 +272,9 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
272
272
|
def from_data_frame(
|
|
273
273
|
cls,
|
|
274
274
|
df: pd.DataFrame,
|
|
275
|
-
id_column:
|
|
276
|
-
timestamp_column:
|
|
277
|
-
static_features_df:
|
|
275
|
+
id_column: str | None = None,
|
|
276
|
+
timestamp_column: str | None = None,
|
|
277
|
+
static_features_df: pd.DataFrame | None = None,
|
|
278
278
|
) -> TimeSeriesDataFrame:
|
|
279
279
|
"""Construct a ``TimeSeriesDataFrame`` from a pandas DataFrame.
|
|
280
280
|
|
|
@@ -315,10 +315,10 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
315
315
|
@classmethod
|
|
316
316
|
def from_path(
|
|
317
317
|
cls,
|
|
318
|
-
path:
|
|
319
|
-
id_column:
|
|
320
|
-
timestamp_column:
|
|
321
|
-
static_features_path:
|
|
318
|
+
path: str | Path,
|
|
319
|
+
id_column: str | None = None,
|
|
320
|
+
timestamp_column: str | None = None,
|
|
321
|
+
static_features_path: str | Path | None = None,
|
|
322
322
|
) -> TimeSeriesDataFrame:
|
|
323
323
|
"""Construct a ``TimeSeriesDataFrame`` from a CSV or Parquet file.
|
|
324
324
|
|
|
@@ -396,8 +396,8 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
396
396
|
@classmethod
|
|
397
397
|
def _construct_static_features(
|
|
398
398
|
cls,
|
|
399
|
-
static_features:
|
|
400
|
-
id_column:
|
|
399
|
+
static_features: pd.DataFrame | str | Path,
|
|
400
|
+
id_column: str | None = None,
|
|
401
401
|
) -> pd.DataFrame:
|
|
402
402
|
if isinstance(static_features, (str, Path)):
|
|
403
403
|
static_features = load_pd.load(str(static_features))
|
|
@@ -421,7 +421,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
421
421
|
return self._static_features
|
|
422
422
|
|
|
423
423
|
@static_features.setter
|
|
424
|
-
def static_features(self, value:
|
|
424
|
+
def static_features(self, value: pd.DataFrame | None):
|
|
425
425
|
# if the current item index is not a multiindex, then we are dealing with a single
|
|
426
426
|
# item slice. this should only happen when the user explicitly requests only a
|
|
427
427
|
# single item or during `slice_by_timestep`. In this case we do not set static features
|
|
@@ -454,7 +454,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
454
454
|
|
|
455
455
|
self._static_features = value
|
|
456
456
|
|
|
457
|
-
def infer_frequency(self, num_items:
|
|
457
|
+
def infer_frequency(self, num_items: int | None = None, raise_if_irregular: bool = False) -> str:
|
|
458
458
|
"""Infer the time series frequency based on the timestamps of the observations.
|
|
459
459
|
|
|
460
460
|
Parameters
|
|
@@ -570,7 +570,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
570
570
|
return obj
|
|
571
571
|
|
|
572
572
|
def __finalize__( # noqa
|
|
573
|
-
self: TimeSeriesDataFrame, other, method:
|
|
573
|
+
self: TimeSeriesDataFrame, other, method: str | None = None, **kwargs
|
|
574
574
|
) -> TimeSeriesDataFrame:
|
|
575
575
|
super().__finalize__(other=other, method=method, **kwargs)
|
|
576
576
|
# when finalizing the copy/slice operation, we use the property setter to stay consistent
|
|
@@ -602,9 +602,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
602
602
|
after = TimeSeriesDataFrame(data_after, static_features=self.static_features)
|
|
603
603
|
return before, after
|
|
604
604
|
|
|
605
|
-
def slice_by_timestep(
|
|
606
|
-
self, start_index: Optional[int] = None, end_index: Optional[int] = None
|
|
607
|
-
) -> TimeSeriesDataFrame:
|
|
605
|
+
def slice_by_timestep(self, start_index: int | None = None, end_index: int | None = None) -> TimeSeriesDataFrame:
|
|
608
606
|
"""Select a subsequence from each time series between start (inclusive) and end (exclusive) indices.
|
|
609
607
|
|
|
610
608
|
This operation is equivalent to selecting a slice ``[start_index : end_index]`` from each time series, and then
|
|
@@ -907,8 +905,8 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
907
905
|
return super().sort_index(*args, **kwargs) # type: ignore
|
|
908
906
|
|
|
909
907
|
def get_model_inputs_for_scoring(
|
|
910
|
-
self, prediction_length: int, known_covariates_names:
|
|
911
|
-
) -> tuple[TimeSeriesDataFrame,
|
|
908
|
+
self, prediction_length: int, known_covariates_names: list[str] | None = None
|
|
909
|
+
) -> tuple[TimeSeriesDataFrame, TimeSeriesDataFrame | None]:
|
|
912
910
|
"""Prepare model inputs necessary to predict the last ``prediction_length`` time steps of each time series in the dataset.
|
|
913
911
|
|
|
914
912
|
Parameters
|
|
@@ -938,8 +936,8 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
938
936
|
def train_test_split(
|
|
939
937
|
self,
|
|
940
938
|
prediction_length: int,
|
|
941
|
-
end_index:
|
|
942
|
-
suffix:
|
|
939
|
+
end_index: int | None = None,
|
|
940
|
+
suffix: str | None = None,
|
|
943
941
|
) -> tuple[TimeSeriesDataFrame, TimeSeriesDataFrame]:
|
|
944
942
|
"""Generate a train/test split from the given dataset.
|
|
945
943
|
|
|
@@ -984,7 +982,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
984
982
|
|
|
985
983
|
def convert_frequency(
|
|
986
984
|
self,
|
|
987
|
-
freq:
|
|
985
|
+
freq: str | pd.DateOffset,
|
|
988
986
|
agg_numeric: str = "mean",
|
|
989
987
|
agg_categorical: str = "first",
|
|
990
988
|
num_cpus: int = -1,
|
|
@@ -1003,7 +1001,7 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
1003
1001
|
|
|
1004
1002
|
Parameters
|
|
1005
1003
|
----------
|
|
1006
|
-
freq :
|
|
1004
|
+
freq : str | pd.DateOffset
|
|
1007
1005
|
Frequency to which the data should be converted. See `pandas frequency aliases <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_
|
|
1008
1006
|
for supported values.
|
|
1009
1007
|
agg_numeric : {"max", "min", "sum", "mean", "median", "first", "last"}, default = "mean"
|
|
@@ -1130,14 +1128,14 @@ class TimeSeriesDataFrame(pd.DataFrame):
|
|
|
1130
1128
|
def reindex(*args, **kwargs) -> Self: ... # type: ignore
|
|
1131
1129
|
|
|
1132
1130
|
@overload
|
|
1133
|
-
def __new__(cls, data: pd.DataFrame, static_features:
|
|
1131
|
+
def __new__(cls, data: pd.DataFrame, static_features: pd.DataFrame | None = None) -> Self: ... # type: ignore
|
|
1134
1132
|
@overload
|
|
1135
1133
|
def __new__(
|
|
1136
1134
|
cls,
|
|
1137
|
-
data:
|
|
1138
|
-
static_features:
|
|
1139
|
-
id_column:
|
|
1140
|
-
timestamp_column:
|
|
1135
|
+
data: pd.DataFrame | str | Path | Iterable,
|
|
1136
|
+
static_features: pd.DataFrame | str | Path | None = None,
|
|
1137
|
+
id_column: str | None = None,
|
|
1138
|
+
timestamp_column: str | None = None,
|
|
1141
1139
|
num_cpus: int = -1,
|
|
1142
1140
|
*args,
|
|
1143
1141
|
**kwargs,
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import reprlib
|
|
3
3
|
import time
|
|
4
|
-
from typing import Any, Literal,
|
|
4
|
+
from typing import Any, Literal, Type
|
|
5
5
|
|
|
6
6
|
import pandas as pd
|
|
7
7
|
|
|
@@ -25,12 +25,12 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
25
25
|
self,
|
|
26
26
|
path_context: str,
|
|
27
27
|
target: str = "target",
|
|
28
|
-
known_covariates_names:
|
|
28
|
+
known_covariates_names: list[str] | None = None,
|
|
29
29
|
trainer_type: Type[TimeSeriesTrainer] = TimeSeriesTrainer,
|
|
30
|
-
eval_metric:
|
|
30
|
+
eval_metric: str | TimeSeriesScorer | None = None,
|
|
31
31
|
prediction_length: int = 1,
|
|
32
32
|
cache_predictions: bool = True,
|
|
33
|
-
ensemble_model_type:
|
|
33
|
+
ensemble_model_type: Type | None = None,
|
|
34
34
|
**kwargs,
|
|
35
35
|
):
|
|
36
36
|
super().__init__(path_context=path_context)
|
|
@@ -41,7 +41,7 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
41
41
|
self.prediction_length = prediction_length
|
|
42
42
|
self.quantile_levels = kwargs.get("quantile_levels", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
|
|
43
43
|
self.cache_predictions = cache_predictions
|
|
44
|
-
self.freq:
|
|
44
|
+
self.freq: str | None = None
|
|
45
45
|
self.ensemble_model_type = ensemble_model_type
|
|
46
46
|
|
|
47
47
|
self.feature_generator = TimeSeriesFeatureGenerator(
|
|
@@ -55,14 +55,14 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
55
55
|
def fit(
|
|
56
56
|
self,
|
|
57
57
|
train_data: TimeSeriesDataFrame,
|
|
58
|
-
hyperparameters:
|
|
59
|
-
val_data:
|
|
60
|
-
hyperparameter_tune_kwargs:
|
|
61
|
-
time_limit:
|
|
62
|
-
num_val_windows:
|
|
63
|
-
val_step_size:
|
|
64
|
-
refit_every_n_windows:
|
|
65
|
-
random_seed:
|
|
58
|
+
hyperparameters: str | dict,
|
|
59
|
+
val_data: TimeSeriesDataFrame | None = None,
|
|
60
|
+
hyperparameter_tune_kwargs: str | dict | None = None,
|
|
61
|
+
time_limit: float | None = None,
|
|
62
|
+
num_val_windows: int = 1,
|
|
63
|
+
val_step_size: int | None = None,
|
|
64
|
+
refit_every_n_windows: int | None = 1,
|
|
65
|
+
random_seed: int | None = None,
|
|
66
66
|
**kwargs,
|
|
67
67
|
) -> None:
|
|
68
68
|
self._time_limit = time_limit
|
|
@@ -95,7 +95,7 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
95
95
|
)
|
|
96
96
|
|
|
97
97
|
assert issubclass(self.trainer_type, TimeSeriesTrainer)
|
|
98
|
-
self.trainer:
|
|
98
|
+
self.trainer: TimeSeriesTrainer | None = self.trainer_type(**trainer_init_kwargs)
|
|
99
99
|
self.trainer_path = self.trainer.path
|
|
100
100
|
self.save()
|
|
101
101
|
|
|
@@ -122,9 +122,9 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
122
122
|
|
|
123
123
|
def _align_covariates_with_forecast_index(
|
|
124
124
|
self,
|
|
125
|
-
known_covariates:
|
|
125
|
+
known_covariates: TimeSeriesDataFrame | None,
|
|
126
126
|
data: TimeSeriesDataFrame,
|
|
127
|
-
) ->
|
|
127
|
+
) -> TimeSeriesDataFrame | None:
|
|
128
128
|
"""Select the relevant item_ids and timestamps from the known_covariates dataframe.
|
|
129
129
|
|
|
130
130
|
If some of the item_ids or timestamps are missing, an exception is raised.
|
|
@@ -163,10 +163,10 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
163
163
|
def predict(
|
|
164
164
|
self,
|
|
165
165
|
data: TimeSeriesDataFrame,
|
|
166
|
-
known_covariates:
|
|
167
|
-
model:
|
|
166
|
+
known_covariates: TimeSeriesDataFrame | None = None,
|
|
167
|
+
model: str | AbstractTimeSeriesModel | None = None,
|
|
168
168
|
use_cache: bool = True,
|
|
169
|
-
random_seed:
|
|
169
|
+
random_seed: int | None = None,
|
|
170
170
|
**kwargs,
|
|
171
171
|
) -> TimeSeriesDataFrame:
|
|
172
172
|
data = self.feature_generator.transform(data)
|
|
@@ -184,8 +184,8 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
184
184
|
def score(
|
|
185
185
|
self,
|
|
186
186
|
data: TimeSeriesDataFrame,
|
|
187
|
-
model:
|
|
188
|
-
metric:
|
|
187
|
+
model: str | AbstractTimeSeriesModel | None = None,
|
|
188
|
+
metric: str | TimeSeriesScorer | None = None,
|
|
189
189
|
use_cache: bool = True,
|
|
190
190
|
) -> float:
|
|
191
191
|
data = self.feature_generator.transform(data)
|
|
@@ -194,8 +194,8 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
194
194
|
def evaluate(
|
|
195
195
|
self,
|
|
196
196
|
data: TimeSeriesDataFrame,
|
|
197
|
-
model:
|
|
198
|
-
metrics:
|
|
197
|
+
model: str | None = None,
|
|
198
|
+
metrics: str | TimeSeriesScorer | list[str | TimeSeriesScorer] | None = None,
|
|
199
199
|
use_cache: bool = True,
|
|
200
200
|
) -> dict[str, float]:
|
|
201
201
|
data = self.feature_generator.transform(data)
|
|
@@ -203,15 +203,15 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
203
203
|
|
|
204
204
|
def get_feature_importance(
|
|
205
205
|
self,
|
|
206
|
-
data:
|
|
207
|
-
model:
|
|
208
|
-
metric:
|
|
209
|
-
features:
|
|
210
|
-
time_limit:
|
|
206
|
+
data: TimeSeriesDataFrame | None = None,
|
|
207
|
+
model: str | None = None,
|
|
208
|
+
metric: str | TimeSeriesScorer | None = None,
|
|
209
|
+
features: list[str] | None = None,
|
|
210
|
+
time_limit: float | None = None,
|
|
211
211
|
method: Literal["naive", "permutation"] = "permutation",
|
|
212
212
|
subsample_size: int = 50,
|
|
213
|
-
num_iterations:
|
|
214
|
-
random_seed:
|
|
213
|
+
num_iterations: int | None = None,
|
|
214
|
+
random_seed: int | None = None,
|
|
215
215
|
relative_scores: bool = False,
|
|
216
216
|
include_confidence_band: bool = True,
|
|
217
217
|
confidence_level: float = 0.99,
|
|
@@ -272,9 +272,9 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
272
272
|
|
|
273
273
|
def leaderboard(
|
|
274
274
|
self,
|
|
275
|
-
data:
|
|
275
|
+
data: TimeSeriesDataFrame | None = None,
|
|
276
276
|
extra_info: bool = False,
|
|
277
|
-
extra_metrics:
|
|
277
|
+
extra_metrics: list[str | TimeSeriesScorer] | None = None,
|
|
278
278
|
use_cache: bool = True,
|
|
279
279
|
) -> pd.DataFrame:
|
|
280
280
|
if data is not None:
|
|
@@ -301,7 +301,7 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
301
301
|
return learner_info
|
|
302
302
|
|
|
303
303
|
def persist_trainer(
|
|
304
|
-
self, models:
|
|
304
|
+
self, models: Literal["all", "best"] | list[str] = "all", with_ancestors: bool = False
|
|
305
305
|
) -> list[str]:
|
|
306
306
|
"""Loads models and trainer in memory so that they don't have to be
|
|
307
307
|
loaded during predictions
|
|
@@ -332,10 +332,10 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
332
332
|
|
|
333
333
|
def backtest_predictions(
|
|
334
334
|
self,
|
|
335
|
-
data:
|
|
335
|
+
data: TimeSeriesDataFrame | None,
|
|
336
336
|
model_names: list[str],
|
|
337
|
-
num_val_windows:
|
|
338
|
-
val_step_size:
|
|
337
|
+
num_val_windows: int | None = None,
|
|
338
|
+
val_step_size: int | None = None,
|
|
339
339
|
use_cache: bool = True,
|
|
340
340
|
) -> dict[str, list[TimeSeriesDataFrame]]:
|
|
341
341
|
if data is not None:
|
|
@@ -350,9 +350,9 @@ class TimeSeriesLearner(AbstractLearner):
|
|
|
350
350
|
|
|
351
351
|
def backtest_targets(
|
|
352
352
|
self,
|
|
353
|
-
data:
|
|
354
|
-
num_val_windows:
|
|
355
|
-
val_step_size:
|
|
353
|
+
data: TimeSeriesDataFrame | None,
|
|
354
|
+
num_val_windows: int | None = None,
|
|
355
|
+
val_step_size: int | None = None,
|
|
356
356
|
) -> list[TimeSeriesDataFrame]:
|
|
357
357
|
if data is not None:
|
|
358
358
|
data = self.feature_generator.transform(data)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
from pprint import pformat
|
|
4
|
-
from typing import Any,
|
|
4
|
+
from typing import Any, Sequence, Type
|
|
5
5
|
|
|
6
6
|
import numpy as np
|
|
7
7
|
|
|
@@ -54,10 +54,10 @@ EXPERIMENTAL_METRICS: dict[str, Type[TimeSeriesScorer]] = {
|
|
|
54
54
|
|
|
55
55
|
|
|
56
56
|
def check_get_evaluation_metric(
|
|
57
|
-
eval_metric:
|
|
57
|
+
eval_metric: str | TimeSeriesScorer | Type[TimeSeriesScorer] | None,
|
|
58
58
|
prediction_length: int,
|
|
59
|
-
seasonal_period:
|
|
60
|
-
horizon_weight:
|
|
59
|
+
seasonal_period: int | None = None,
|
|
60
|
+
horizon_weight: Sequence[float] | np.ndarray | None = None,
|
|
61
61
|
) -> TimeSeriesScorer:
|
|
62
62
|
"""Factory method for TimeSeriesScorer objects.
|
|
63
63
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import warnings
|
|
2
|
-
from typing import
|
|
2
|
+
from typing import Sequence, overload
|
|
3
3
|
|
|
4
4
|
import numpy as np
|
|
5
5
|
import pandas as pd
|
|
@@ -52,13 +52,13 @@ class TimeSeriesScorer:
|
|
|
52
52
|
optimum: float = 0.0
|
|
53
53
|
optimized_by_median: bool = False
|
|
54
54
|
needs_quantile: bool = False
|
|
55
|
-
equivalent_tabular_regression_metric:
|
|
55
|
+
equivalent_tabular_regression_metric: str | None = None
|
|
56
56
|
|
|
57
57
|
def __init__(
|
|
58
58
|
self,
|
|
59
59
|
prediction_length: int = 1,
|
|
60
|
-
seasonal_period:
|
|
61
|
-
horizon_weight:
|
|
60
|
+
seasonal_period: int | None = None,
|
|
61
|
+
horizon_weight: Sequence[float] | None = None,
|
|
62
62
|
):
|
|
63
63
|
self.prediction_length = int(prediction_length)
|
|
64
64
|
if self.prediction_length < 1:
|
|
@@ -192,7 +192,7 @@ class TimeSeriesScorer:
|
|
|
192
192
|
return self.optimum - self.score(*args, **kwargs)
|
|
193
193
|
|
|
194
194
|
@staticmethod
|
|
195
|
-
def _safemean(array:
|
|
195
|
+
def _safemean(array: np.ndarray | pd.Series) -> float:
|
|
196
196
|
"""Compute mean of a numpy array-like object, ignoring inf, -inf and nan values."""
|
|
197
197
|
return float(np.mean(array[np.isfinite(array)]))
|
|
198
198
|
|
|
@@ -240,13 +240,13 @@ class TimeSeriesScorer:
|
|
|
240
240
|
@overload
|
|
241
241
|
@staticmethod
|
|
242
242
|
def check_get_horizon_weight(
|
|
243
|
-
horizon_weight:
|
|
243
|
+
horizon_weight: Sequence[float] | np.ndarray, prediction_length: int
|
|
244
244
|
) -> np.ndarray: ...
|
|
245
245
|
|
|
246
246
|
@staticmethod
|
|
247
247
|
def check_get_horizon_weight(
|
|
248
|
-
horizon_weight:
|
|
249
|
-
) ->
|
|
248
|
+
horizon_weight: Sequence[float] | np.ndarray | None, prediction_length: int
|
|
249
|
+
) -> np.ndarray | None:
|
|
250
250
|
"""Convert horizon_weight to a non-negative numpy array that sums up to prediction_length.
|
|
251
251
|
Raises an exception if horizon_weight has an invalid shape or contains invalid values.
|
|
252
252
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import warnings
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import Sequence
|
|
4
4
|
|
|
5
5
|
import numpy as np
|
|
6
6
|
import pandas as pd
|
|
@@ -279,13 +279,13 @@ class MASE(TimeSeriesScorer):
|
|
|
279
279
|
def __init__(
|
|
280
280
|
self,
|
|
281
281
|
prediction_length: int = 1,
|
|
282
|
-
seasonal_period:
|
|
283
|
-
horizon_weight:
|
|
282
|
+
seasonal_period: int | None = None,
|
|
283
|
+
horizon_weight: Sequence[float] | None = None,
|
|
284
284
|
):
|
|
285
285
|
super().__init__(
|
|
286
286
|
prediction_length=prediction_length, seasonal_period=seasonal_period, horizon_weight=horizon_weight
|
|
287
287
|
)
|
|
288
|
-
self._past_abs_seasonal_error:
|
|
288
|
+
self._past_abs_seasonal_error: pd.Series | None = None
|
|
289
289
|
|
|
290
290
|
def save_past_metrics(
|
|
291
291
|
self, data_past: TimeSeriesDataFrame, target: str = "target", seasonal_period: int = 1, **kwargs
|
|
@@ -353,13 +353,13 @@ class RMSSE(TimeSeriesScorer):
|
|
|
353
353
|
def __init__(
|
|
354
354
|
self,
|
|
355
355
|
prediction_length: int = 1,
|
|
356
|
-
seasonal_period:
|
|
357
|
-
horizon_weight:
|
|
356
|
+
seasonal_period: int | None = None,
|
|
357
|
+
horizon_weight: Sequence[float] | None = None,
|
|
358
358
|
):
|
|
359
359
|
super().__init__(
|
|
360
360
|
prediction_length=prediction_length, seasonal_period=seasonal_period, horizon_weight=horizon_weight
|
|
361
361
|
)
|
|
362
|
-
self._past_squared_seasonal_error:
|
|
362
|
+
self._past_squared_seasonal_error: pd.Series | None = None
|
|
363
363
|
|
|
364
364
|
def save_past_metrics(
|
|
365
365
|
self, data_past: TimeSeriesDataFrame, target: str = "target", seasonal_period: int = 1, **kwargs
|
|
@@ -471,8 +471,8 @@ class WCD(TimeSeriesScorer):
|
|
|
471
471
|
self,
|
|
472
472
|
alpha: float = 0.5,
|
|
473
473
|
prediction_length: int = 1,
|
|
474
|
-
seasonal_period:
|
|
475
|
-
horizon_weight:
|
|
474
|
+
seasonal_period: int | None = None,
|
|
475
|
+
horizon_weight: Sequence[float] | None = None,
|
|
476
476
|
):
|
|
477
477
|
super().__init__(
|
|
478
478
|
prediction_length=prediction_length, seasonal_period=seasonal_period, horizon_weight=horizon_weight
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import
|
|
1
|
+
from typing import Sequence
|
|
2
2
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
import pandas as pd
|
|
@@ -92,13 +92,13 @@ class SQL(TimeSeriesScorer):
|
|
|
92
92
|
def __init__(
|
|
93
93
|
self,
|
|
94
94
|
prediction_length: int = 1,
|
|
95
|
-
seasonal_period:
|
|
96
|
-
horizon_weight:
|
|
95
|
+
seasonal_period: int | None = None,
|
|
96
|
+
horizon_weight: Sequence[float] | None = None,
|
|
97
97
|
):
|
|
98
98
|
super().__init__(
|
|
99
99
|
prediction_length=prediction_length, seasonal_period=seasonal_period, horizon_weight=horizon_weight
|
|
100
100
|
)
|
|
101
|
-
self._past_abs_seasonal_error:
|
|
101
|
+
self._past_abs_seasonal_error: pd.Series | None = None
|
|
102
102
|
|
|
103
103
|
def save_past_metrics(
|
|
104
104
|
self, data_past: TimeSeriesDataFrame, target: str = "target", seasonal_period: int = 1, **kwargs
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from .autogluon_tabular import DirectTabularModel, PerStepTabularModel, RecursiveTabularModel
|
|
2
|
-
from .chronos import ChronosModel
|
|
2
|
+
from .chronos import Chronos2Model, ChronosModel
|
|
3
3
|
from .gluonts import (
|
|
4
4
|
DeepARModel,
|
|
5
5
|
DLinearModel,
|
|
@@ -45,6 +45,7 @@ __all__ = [
|
|
|
45
45
|
"ETSModel",
|
|
46
46
|
"IMAPAModel",
|
|
47
47
|
"ChronosModel",
|
|
48
|
+
"Chronos2Model",
|
|
48
49
|
"ModelRegistry",
|
|
49
50
|
"NPTSModel",
|
|
50
51
|
"NaiveModel",
|