dsipts 1.1.5__tar.gz → 1.1.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. dsipts-1.1.6/PKG-INFO +434 -0
  2. dsipts-1.1.6/README.md +402 -0
  3. {dsipts-1.1.5 → dsipts-1.1.6}/pyproject.toml +1 -1
  4. dsipts-1.1.6/src/dsipts.egg-info/PKG-INFO +434 -0
  5. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts.egg-info/SOURCES.txt +1 -0
  6. dsipts-1.1.5/PKG-INFO +0 -31
  7. dsipts-1.1.5/src/dsipts.egg-info/PKG-INFO +0 -31
  8. {dsipts-1.1.5 → dsipts-1.1.6}/setup.cfg +0 -0
  9. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/__init__.py +0 -0
  10. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/data_management/__init__.py +0 -0
  11. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/data_management/monash.py +0 -0
  12. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/data_management/public_datasets.py +0 -0
  13. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/data_structure/__init__.py +0 -0
  14. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/data_structure/data_structure.py +0 -0
  15. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/data_structure/modifiers.py +0 -0
  16. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/data_structure/utils.py +0 -0
  17. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/Autoformer.py +0 -0
  18. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/CrossFormer.py +0 -0
  19. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/D3VAE.py +0 -0
  20. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/Diffusion.py +0 -0
  21. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/DilatedConv.py +0 -0
  22. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/DilatedConvED.py +0 -0
  23. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/Duet.py +0 -0
  24. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/ITransformer.py +0 -0
  25. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/Informer.py +0 -0
  26. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/LinearTS.py +0 -0
  27. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/PatchTST.py +0 -0
  28. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/Persistent.py +0 -0
  29. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/RNN.py +0 -0
  30. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/Samformer.py +0 -0
  31. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/TFT.py +0 -0
  32. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/TIDE.py +0 -0
  33. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/TTM.py +0 -0
  34. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/TimeXER.py +0 -0
  35. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/VQVAEA.py +0 -0
  36. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/VVA.py +0 -0
  37. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/__init__.py +0 -0
  38. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/autoformer/__init__.py +0 -0
  39. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/autoformer/layers.py +0 -0
  40. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/base.py +0 -0
  41. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/base_v2.py +0 -0
  42. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/crossformer/__init__.py +0 -0
  43. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/crossformer/attn.py +0 -0
  44. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/crossformer/cross_decoder.py +0 -0
  45. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/crossformer/cross_embed.py +0 -0
  46. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/crossformer/cross_encoder.py +0 -0
  47. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/d3vae/__init__.py +0 -0
  48. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/d3vae/diffusion_process.py +0 -0
  49. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/d3vae/embedding.py +0 -0
  50. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/d3vae/encoder.py +0 -0
  51. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/d3vae/model.py +0 -0
  52. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/d3vae/neural_operations.py +0 -0
  53. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/d3vae/resnet.py +0 -0
  54. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/d3vae/utils.py +0 -0
  55. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/duet/__init__.py +0 -0
  56. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/duet/layers.py +0 -0
  57. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/duet/masked.py +0 -0
  58. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/informer/__init__.py +0 -0
  59. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/informer/attn.py +0 -0
  60. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/informer/decoder.py +0 -0
  61. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/informer/embed.py +0 -0
  62. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/informer/encoder.py +0 -0
  63. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/itransformer/Embed.py +0 -0
  64. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/itransformer/SelfAttention_Family.py +0 -0
  65. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/itransformer/Transformer_EncDec.py +0 -0
  66. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/itransformer/__init__.py +0 -0
  67. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/patchtst/__init__.py +0 -0
  68. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/patchtst/layers.py +0 -0
  69. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/samformer/__init__.py +0 -0
  70. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/samformer/utils.py +0 -0
  71. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/tft/__init__.py +0 -0
  72. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/tft/sub_nn.py +0 -0
  73. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/timexer/Layers.py +0 -0
  74. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/timexer/__init__.py +0 -0
  75. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/ttm/__init__.py +0 -0
  76. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/ttm/configuration_tinytimemixer.py +0 -0
  77. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/ttm/consts.py +0 -0
  78. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/ttm/modeling_tinytimemixer.py +0 -0
  79. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/ttm/utils.py +0 -0
  80. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/utils.py +0 -0
  81. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/vva/__init__.py +0 -0
  82. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/vva/minigpt.py +0 -0
  83. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/vva/vqvae.py +0 -0
  84. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/xlstm/__init__.py +0 -0
  85. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts/models/xlstm/xLSTM.py +0 -0
  86. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts.egg-info/dependency_links.txt +0 -0
  87. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts.egg-info/requires.txt +0 -0
  88. {dsipts-1.1.5 → dsipts-1.1.6}/src/dsipts.egg-info/top_level.txt +0 -0
dsipts-1.1.6/PKG-INFO ADDED
@@ -0,0 +1,434 @@
1
+ Metadata-Version: 2.4
2
+ Name: dsipts
3
+ Version: 1.1.6
4
+ Summary: Unified library for timeseries modelling
5
+ Author-email: Andrea Gobbi <agobbi@fbk.eu>
6
+ Project-URL: Homepage, https://github.com/DSIP-FBK/DSIPTS
7
+ Project-URL: Docs, https://dsip-fbk.github.io/DSIPTS/
8
+ Requires-Python: ==3.11.13
9
+ Description-Content-Type: text/markdown
10
+ Requires-Dist: aim==3.20.1
11
+ Requires-Dist: beautifulsoup4==4.12.0
12
+ Requires-Dist: einops>=0.8.1
13
+ Requires-Dist: html-table-parser-python3==0.3.1
14
+ Requires-Dist: html5lib>=1.1
15
+ Requires-Dist: hydra-core>=1.3.2
16
+ Requires-Dist: hydra-joblib-launcher>=1.2.0
17
+ Requires-Dist: hydra-optuna-sweeper>=1.2.0
18
+ Requires-Dist: hydra-submitit-launcher>=1.2.0
19
+ Requires-Dist: ipykernel>=6.30.1
20
+ Requires-Dist: lightning>=2.5.4
21
+ Requires-Dist: matplotlib>=3.10.6
22
+ Requires-Dist: nbformat>=5.10.4
23
+ Requires-Dist: numba>=0.61.2
24
+ Requires-Dist: numpy<2.0.0
25
+ Requires-Dist: pandas>=2.3.2
26
+ Requires-Dist: plotly>=6.3.0
27
+ Requires-Dist: scikit-learn>=1.7.1
28
+ Requires-Dist: sphinx>=8.2.3
29
+ Requires-Dist: sphinx-mdinclude>=0.6.2
30
+ Requires-Dist: sphinx-pdj-theme>=0.7.3
31
+ Requires-Dist: transformers>=4.56.0
32
+
33
+
34
+
35
+ # DSIPTS: unified library for timeseries modelling
36
+ > [!CAUTION]
37
+ The documentation, README and notebook are somehow outdated, some architectures are under review, please be patient. Moreover, there will some frequent changes due to refactoring or documentation update. Wait the version 1.2.0 for more stable library (in terms of structure and documentation) or even 2.0.0 for the tests, assertions and other standard stuff.
38
+
39
+ This library allows to:
40
+
41
+ - load timeseries in a convenient format
42
+ - create tool timeseries with controlled categorical features
43
+ - load public timeseries
44
+ - train a predictive model using different PyTorch architectures
45
+ - define more complex structures using Modifiers (e.g. combining unsupervised learning + deep learning)
46
+
47
+ ## Disclamer
48
+ The original repository is located [here](https://gitlab.fbk.eu/dsip/dsip_dlresearch/timeseries) but there is a push mirror in gitlab and you can find it [here](https://github.com/DSIP-FBK/DSIPTS/). Depending on the evolution of the library we will decide if keep both or move definitively to github.
49
+
50
+
51
+ ## Background
52
+
53
+ Let $X(t)$ be a multivariate timeseries, e.g. $\forall t, X(t)\in \mathbf{R}^k$ for some $k$. The vector space $\mathbf{R}^k$ can be partitioned into two disjoint sets: the categorical features $\mathcal{C}\subset \mathbf{N}^c$ and continuous features $\mathcal{W}\subset \mathbf{R}^{k-c}$. We assume that $\mathcal{C}$ is known for each $t$. Let $\mathcal{F}\subset\mathbf{R}^{f}$ be the set of known variables for each $t$, $\mathcal{P}\subset\mathbf{R}^{p}$ be the set of variables known until time $t$, and $\mathcal{T}\subset\mathcal{P}\subset\mathbf{R}^{s}$ the target variables. Let also define $\tau\in N$ as the number of lag for wich we want a forecast, then the aim of a predictive model is to find a function $F:\mathbf{R}^k\rightarrow\mathbf{R}^{s \times \tau}$ such as:
54
+
55
+ $$
56
+ F(\mathcal{C}(t-K,\ldots,t+\tau),\mathcal{F}(t-K,\ldots,t+\tau),\mathcal{P}(t-K,\ldots,t),\mathcal{T}(t-K,\ldots,t) ) = \mathcal{T}(t+1,\ldots,t+\tau)
57
+ $$
58
+
59
+ for some K representing the maximum past context.
60
+
61
+ In the library we adopt some convention that must be used when developing a new model:
62
+ ```
63
+ y : the target variable(s)
64
+ x_num_past: the numerical past variables
65
+ x_num_future: the numerical future variables
66
+ x_cat_past: the categorical past variables
67
+ x_cat_future: the categorical future variables
68
+ idx_target: index containing the y variables in the past dataset. Can be used during the training for train a differential model
69
+ ```
70
+ by default, during the dataset construction, the target variable will be added to the `x_num_past` list. Moreover the set of categorical variable can be different in the past and the future but we choose to distinguish the two parts during the forward loop for seek of generability.
71
+
72
+ During the forward process, the batch is a dictionary with some of the key showed above, remember that not all keys are always present (check it please) and build a model according. The shape of such tensor are in the form $[B,L,C]$ where $B$ indicates the batch size, $L$ the length and $C$ the number of channels.
73
+
74
+ The output of a new model must be $[B,L,C,1]$ in case of single prediction or $[B,L,C,3]$ in case you are using quantile loss.
75
+
76
+
77
+ Try to reuse some of the common keywords while building your model. After the initialization of the model you can use whatever variable you want but during the initialization please use the following conventions.
78
+ This first block maybe is common between several architectures:
79
+
80
+ ---
81
+
82
+ - **past_steps** = int. THIS IS CRUCIAL and self explanatory
83
+ - **future_steps** = int. THIS IS CRUCIAL and self explanatory
84
+ - **past_channels** = len(ts.num_var). THIS IS CRUCIAL and self explanatory
85
+ - **future_channels** = len(ts.future_variables). THIS IS CRUCIAL and self explanatory
86
+ - **out_channels** = len(ts.target_variables). THIS IS CRUCIAL and self explanatory
87
+ - **embs_past** = [ts.dataset[c].nunique() for c in ts.cat_past_var]. THIS IS CRUCIAL and self explanatory.
88
+ - **embs_fut** = [ts.dataset[c].nunique() for c in ts.cat_fut_var]. THIS IS CRUCIAL and self explanatory.
89
+ - **use_classical_positional_encoder** = classical positioal code are done with the combination of sin/cos/exponenstial function, otherwise the positional encoding is done with the `nn.Embedding` like the other categorical variables
90
+ - **reduction_mode** = the categorical metafeatures can be summed, averaged or stacked depending on what behavior you like more.
91
+ - **emb_dim** = int. Dimension of embedded categorical variables, the choice here is to use a constant value and let the user chose if concatenate or sum the variables
92
+ - **quantiles** =[0.1,0.5,0.9]. Quantiles for quantile loss
93
+ - **kind** =str. If there are some similar architectures with small differences maybe is better to use the same code specifying some properties (e.g. GRU vs LSTM)
94
+ - **activation** = str ('torch.nn.ReLU' default). activation function between layers (see [pytorch activation functions](https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity))
95
+ - **optim** = str ('torch.optim.Adam' default). optimization function see [pytorch optimization functions](https://pytorch.org/docs/stable/optim.html)
96
+ - **dropout_rate** =float. dropout rate
97
+ - **use_bn** =boolean . Use or not batch normalization
98
+ - **persistence_weight** = float . Penalization weight for persistent predictions
99
+ - **loss_type** = str . There are some other metrics implemented, see the [metric section](#metrics) for details
100
+
101
+
102
+ ---
103
+ some are more specific for RNN-CONV architectures:
104
+
105
+ ---
106
+ - **hidden_RNN** = int. If there are some RNN use this and the following
107
+ - **num_layers_RNN** = int.
108
+ - **kernel_size** = int. If there are some convolutional layers
109
+
110
+ ---
111
+
112
+ linear:
113
+
114
+ - **hidden_size** = int. Usually the hidden dimension, for some architecture maybe you can pass the list of the dimensions
115
+ - **kind** =str. Type of linear approach
116
+
117
+ ---
118
+
119
+ or attention based models:
120
+
121
+ - **d_model** = int .d_model of a typical attention layer
122
+ - **n_heads** = int .Heads
123
+ - **dropout_rate** = float. dropout
124
+ - **n_layer_encoder** = int. encoder layers
125
+ - **n_layer_decoder** = int. decoder layers
126
+ ---
127
+
128
+ ## Install
129
+ Clone the repo (gitlab or github)
130
+ The library is structured to work with [uv](https://github.com/astral-sh/uv). After installing `uv` just run
131
+ ```
132
+ uv pip install .
133
+ ```
134
+ The pip package wi
135
+
136
+
137
+ ## For developers
138
+ - Remember to update the `pyproject.toml`
139
+ - use `uv add` and `uv sync` for update the project
140
+ - `uv pip install -e .` for install dsipts
141
+ - `uv build` for building it
142
+ - `uv pip install dist/dsipts-X.Y.Z-py3-none-any.whl` for checking the installation
143
+ - generate documentation with `uv run sphinx-quickstart docs` (just the first time)
144
+ - `uv run sphinx-apidoc -o docs/source src/dsipts`
145
+ - `uv run sphinx-build -b html docs/source ../docs`
146
+ ## AIM
147
+ DSIPTS uses AIM for tracking losses, parameters and other useful information. The first time you use DSIPTS you may need to initialize aim executing:
148
+ ```bash
149
+ aim init
150
+ ```
151
+
152
+
153
+
154
+ ## Usage
155
+
156
+ Let make an example with the public weather data (you can find it [here](https://drive.google.com/drive/folders/13Cg1KYOlzM5C7K8gK8NfC-F3EYxkM3D2) or [here](https://github.com/thuml/Time-Series-Library?tab=readme-ov-file))
157
+
158
+
159
+
160
+
161
+ ```python
162
+ import pandas as pd
163
+ import numpy as np
164
+ from dsipts import TimeSeries, RNN,read_public_dataset
165
+ import matplotlib.pyplot as plt
166
+ from datetime import timedelta
167
+ import logging
168
+ import sys
169
+ data, columns = read_public_dataset(PATH_TO_DATA,'weather')
170
+ ```
171
+ define then how to use the information and define the time series. You can add automatically the `hour` categorical data using the key `enrich_cat` that will be automatically added to the categorical past and categorical future list of columns:
172
+ ```pyhon
173
+ use_covariates = False #use only y in the PAST
174
+ use_future_covariate = True #suppose to have some future covariates
175
+ ts = TimeSeries('weather')
176
+ ts.load_signal( data,enrich_cat=['hour'],target_variables=['y'],past_variables=columns if use_covariates else [], future_variables=columns if use_future_covariate else [] )
177
+ fig = ts.plot() # plot the target variable(s )
178
+ ```
179
+ The most important part is the method `ts.load_signal` where the user can specify the parameters of the timeseries such as:
180
+
181
+
182
+ - **data** (pd.DataFrame) – input dataset the column indicating the time must be called time
183
+
184
+ - **enrich_cat** (List[str], optional) – it is possible to let this function enrich the dataset for example adding the standard columns: hour, dow, month and minute. Defaults to [].
185
+
186
+ - **past_variables** (List[str], optional) – list of column names of past variables not available for future times . Defaults to [].
187
+
188
+ - **future_variables** (List[str], optional) – list of future variables available for future times. Defaults to [].
189
+
190
+ - **target_variables** (List[str], optional) – list of the target variables. They will added to past_variables by default unless check_past is false. Defaults to [].
191
+
192
+ - **cat_past_var** (List[str], optional) – list of the past categorical variables. Defaults to [].
193
+
194
+ - **cat_future_var** (List[str], optional) – list of the future categorical variables. Defaults to [].
195
+
196
+ - **check_past** (bool, optional) – see target_variables. Defaults to True.
197
+
198
+ - **group** (str or None, optional) – if not None the time series dataset is considered composed by homogeneous timeseries coming from different realization (for example point of sales, cities, locations) default None (and the relative series are not split during the sample generation. Defaults to)
199
+
200
+ - **check_holes_and_duplicates** (bool, optional) – if False duplicates or holes will not checked, the dataloader can not correctly work, disable at your own risk. Defaults True
201
+
202
+ - **silly_model (bool, optional)** – if True, target variables will be added to the pool of the future variables. This can be useful to see if information passes thought the decoder part of your model (if any)
203
+
204
+
205
+
206
+ Now we can define a forecasting problem (`past_steps` as context, `future_steps` as future horizon )
207
+
208
+ Let suppose to use a RNN encoder-decoder structure, then the model has the following parameters:
209
+ ```python
210
+ past_steps = 12*7
211
+ future_steps = 12
212
+ config = dict(model_configs =dict(
213
+
214
+ past_steps = past_steps, #TASK DEPENDENT
215
+ future_steps = future_steps,#TASK DEPENDENT
216
+
217
+ emb_dim = 16, # categorical stuff
218
+ use_classical_positional_encoder = True, # categorical stuff
219
+ reduction_mode = 'mean',# categorical stuff
220
+
221
+ kind = 'gru',# model dependent
222
+ hidden_RNN = 12,# model dependent
223
+ num_layers_RNN = 2,# model dependent
224
+ kernel_size = 15,# model dependent
225
+ dropout_rate= 0.5,# model dependent
226
+ remove_last= True,# model dependent
227
+ use_bn = False,# model dependent
228
+ activation= 'torch.nn.PReLU', # model dependent
229
+
230
+ quantiles=[0.1,0.5,0.9], #LOSS
231
+ persistence_weight= 0.010, #LOSS
232
+ loss_type= 'l1', #LOSS
233
+
234
+ optim= 'torch.optim.Adam', #OPTIMIZER
235
+
236
+ past_channels = len(ts.past_variables), #parameter that depends on the ts dataset
237
+ future_channels = len(ts.future_variables), #parameter that depends on the ts dataset
238
+ embs_past = [ts.dataset[c].nunique() for c in ts.cat_past_var], #parameter that depends on the ts dataset
239
+ embs_fut = [ts.dataset[c].nunique() for c in ts.cat_fut_var], #parameter that depends on the ts dataset
240
+ out_channels = len(ts.target_variables)), #parameter that depends on the ts dataset
241
+
242
+ scheduler_config = dict(gamma=0.1,step_size=100),
243
+ optim_config = dict(lr = 0.0005,weight_decay=0.01))
244
+ model_rnn = RNN(**config['model_configs'],optim_config = config['optim_config'],scheduler_config =config['scheduler_config'],verbose=False )
245
+
246
+ ts.set_model(model_rnn,config=config )
247
+
248
+ ```
249
+
250
+
251
+ Now we are ready to split and train our model. First define the splitting configuration:
252
+ ```python
253
+ split_params = {'perc_train':0.7,'perc_valid':0.1, ##if not None it will split 70% 10% 20%
254
+ 'range_train':None, 'range_validation':None, 'range_test':None, ## or we can split using ranges for example range_train=['2021-02-03','2022-04-08']
255
+ 'past_steps':past_steps,
256
+ 'future_steps':future_steps,
257
+ 'starting_point':None, ## do not skip samples
258
+ 'skip_step' : 10 ## distance between two consecutive samples, aka the stride (larger it is, less point we have in train)
259
+ }
260
+
261
+ ts.train_model(dirpath=PATH_TO_SAVING_STUFF,
262
+ split_params=split_params,
263
+ batch_size=128,
264
+ num_workers=4,
265
+ max_epochs=2,
266
+ gradient_clip_val= 0.0,
267
+ gradient_clip_algorithm='value',
268
+ precision='bf16',
269
+ auto_lr_find=True)
270
+
271
+ ts.losses.plot()
272
+ ts.save("weather") ##save all the metadata to use it in inference mode after
273
+
274
+ ```
275
+
276
+ It is possble to split the data indicating the percentage of data to use in train, validation, test or the ranges. The `shift` parameters indicates if there is a shift constucting the y array. It cab be used for some attention model where we need to know the first value of the timeseries to predict. It may disappear in future because it is misleading. The `skip_step` parameters indicates how many temporal steps there are between samples. If you need a future signal that is long `skip_step+future_steps` then you should put `keep_entire_seq_while_shifting` to True (see Informer model).
277
+
278
+ During the training phase a log stream will be generated. If a single process is spawned the log will be displayed, otherwise a file will be generated. Moreover, inside the `weight` path there wil be the `loss.csv` file containing the running losses.
279
+
280
+ At the end of the training process it is possible to load the model passing the model class (`RNN`) and the saving name used before (`weather`)
281
+ If the same model and the same name are used for defining the time series, the training procedure will continue from the last checkpoint. Due to lightening related usage, the counting of the epochs will start from the last stage (if you trained if for 10 epochs and you want to train 10 epochs more you need to change it to 20).
282
+
283
+
284
+
285
+ ```python
286
+
287
+ ts.load(RNN,"weather",load_last=True)
288
+ res = ts.inference_on_set(200,4,set='test',rescaling=True)
289
+ error = res.groupby('lag').apply(lambda x: np.nanmean((x.y-x.y_median)**2)).reset_index().rename(columns={0:'error'})
290
+
291
+ ```
292
+ If a quantile loss has been selected the model generates three signals `_low, _median, _high`, if not the output the model is indicated with `_pred`. Lag indicates which step the prediction is referred (eg. lag=1 is the first output of the model along the sequence output).
293
+
294
+ ```
295
+ import matplotlib.pyplot as plt
296
+ mask = res.prediction_time=='2020-10-19 19:50:00'
297
+ plt.plot(res.lag[mask],res.y[mask],label='real')
298
+ plt.plot(res.lag[mask],res.y_median[mask],label='median')
299
+ plt.legend()
300
+ ```
301
+ Another useful plot is the error plot per lag where it is possible to observe the increment of the error in correlation with the lag time:
302
+
303
+ ```
304
+ import numpy as np
305
+ res['error'] =np.abs( res['y']-res['y_median'])
306
+ res.groupby('lag').error.mean().plot()
307
+ ```
308
+
309
+
310
+
311
+ This example can be found [here](/notebooks/public_timeseries.ipynb).
312
+
313
+ # Categorical variables
314
+ Most of the models implemented can deal with categorical variables (`cat_past_var` and `cat_fut_var`). In particulare there are some variables that you don't need to computed. When declaring a `ts` obejct you can pass also the parameter `enrich_cat=['dow']` that will add to the dataframe (and to the dataloader) the day of the week. Since now you can automatically add `hour, dow, month and minute`. If there are other categorical variables pleas add it to the list while loading your data.
315
+
316
+
317
+
318
+ # Models
319
+ A description of each model can be found in the class documentation [here](https://dsip.pages.fbk.eu/dsip_dlresearch/timeseries/).
320
+ It is possible to use one of the following architectures:
321
+
322
+ - **RNN** (GRU, LSTM or xLSTM) models, (xLSTM)[https://arxiv.org/pdf/2405.04517] are taken from the [official repo](https://github.com/muditbhargava66/PyxLSTM)
323
+ - **Linear** models based on the [official repository](https://github.com/cure-lab/LTSF-Linear), [paper](https://arxiv.org/pdf/2205.13504.pdf). An alternative model (alinear) has been implemented that drop the autoregressive part and uses only covariates
324
+ - **Crossformer** [official repository](https://github.com/cheerss/CrossFormer), [paper](https://openreview.net/forum?id=vSVLM2j9eie)
325
+ - **Informer** [official repository](https://github.com/zhouhaoyi/Informer2020), [paper](https://arxiv.org/abs/2012.07436)
326
+ - **Autoformer** [non official repository](https://github.com/yuqinie98/PatchTST/tree/main), [paper](https://arxiv.org/abs/2106.13008)
327
+ - **PatchTST** [official repository](https://github.com/yuqinie98/PatchTST/tree/main), [paper](https://arxiv.org/abs/2211.14730)
328
+ - **Persistent** baseline model
329
+ - **TFT** [paper](https://arxiv.org/abs/1912.09363)
330
+ - **DilatedConv** dilated convolutional RNN: the transfer of knowledge between past and future is performed reusing the final hidden status of the RNN of the encoder as initial hidden status of the decoder.
331
+ - **DilatedConvED** dilated convolutional RNN with an encoder/decoder structure.
332
+
333
+ - **ITransformer** [paper](https://arxiv.org/abs/2310.06625), [official repo](https://github.com/thuml/iTransformer)
334
+ - **TIDE** [paper](https://arxiv.org/abs/2304.08424)
335
+ - **Samformer** [paper](https://arxiv.org/pdf/2402.10198) [official repo](https://github.com/romilbert/samformer/tree/main?tab=MIT-1-ov-)
336
+ - **Duet** [paper](https://arxiv.org/abs/2412.10859) [official repo](https://github.com/decisionintelligence/DUET)
337
+
338
+ These models are under review because broken or not aligned with the recent distinction between past and future categorical data:
339
+
340
+ - **Diffusion** custom [diffusion process](https://arxiv.org/abs/2102.09672) using the attention mechanism in the subnets.
341
+ - **D3VAE** adaptation of the [official repository](https://github.com/PaddlePaddle/PaddleSpatial), [paper](https://arxiv.org/abs/2301.03028)
342
+ - **VQVAE** adaptation of [vqvae for images](https://nbviewer.org/github/zalandoresearch/pytorch-vq-vae/blob/master/vq-vae.ipynb) decribed in this [paper](https://arxiv.org/abs/1711.00937) paired with [GPT](https://github.com/karpathy/minGPT) transformer.
343
+ - **VVA** like VQVAE but the tokenization step is performed using a clustering standard procedure.
344
+
345
+ ## Metrics
346
+ In some cases the persistence model is hard to beat and even the more complex model can fall in the persistence trap that propagates the last seen values.
347
+ For this reason a set of metrics can be used trying to avoid the model to get stuck in the trap. In particular we implemented: MSE, L1, sinkhorn divergence, dilated
348
+ loss, quantile loss, MDA and a couple of experimental losses for minimizing the variance or penalizing the persistency. See the base model definition in `dsipts/models/base.py` for more details.
349
+
350
+
351
+
352
+ # Bash experiment
353
+ Most of the time you want to train the models in a cluster with a GPU and command line training procedure can help speedup the process. DSIPTS leverages on OmegaConf-Hydra to to this and in the folder `bash_examples` you can find an examples. Please read the documentation [here](/bash_examples/README.md)
354
+
355
+
356
+
357
+ # Modifiers
358
+
359
+ The VVA model is composed by two steps: the first is a clusterting procedure that divides the input time series in smaller segments an performs a clustering procedure in order to associate a label for each segment. A this point the GPT models works on the sequence of labels trying to predict the next cluster id. Using the centroids of the clusters (and the variace) the final ouput is reconstructed. This pipeline is quite unusual and does not fit with the automation pipeline, but it is possible to use a `Modifier` an abstract class that has 3 methods:
360
+ - **fit_transform**: called before startin the training process and returns the train/validation pytorch datasets. In the aforementioned model the clustering model is trained.
361
+ - **transform**: used during the inference phase. It is similar to fit_transform but without the training process
362
+ - **inverse_transform**: the output of the model are reverted to the original shape. In the VVA model the centroids are used for reconstruct the predicted timeseries.
363
+
364
+
365
+ ## Documentation
366
+ You can find the documentation [here](https://dsip.pages.fbk.eu/dsip_dlresearch/timeseries/):
367
+ or in the folder `docs/_build/html/index.html`
368
+ If yon need to generate the documentation after some modification just run:
369
+ ```
370
+ ./make_doc.sh
371
+ ```
372
+
373
+ For user only: be sure that the the CI file has pages enabled, see [public pages](https://roneo.org/en/gitlab-public-pages-private-repo/)
374
+
375
+ # Adding new models
376
+ If you want to add a model:
377
+
378
+ - extend the `Base` class in `dsipts/models`
379
+ - add the export line in the `dsipts/__init__.py`
380
+ - add a full configuration file in `bash_examples/config_test/architecture`
381
+ - optional: add in `bash_script/utils.py` the section to initializate and load the new model
382
+ - add the modifier in `dsipts/data_structure/modifiers.py` if it is required
383
+
384
+ # Testing
385
+ See [here](/bash_examples/README.md) for the testing session.
386
+
387
+ # Logging
388
+ From version 1.1.0, Aim is used for logging all the experiments and metrics. It is quite easy to install and to use. Just go inside the main folder (`bash_exaples`) and run:
389
+ ```
390
+ aim init #only the first time
391
+ aim up
392
+ ```
393
+ and then open the url (http://127.0.0.1:43800)[http://127.0.0.1:43800]. It will show the model parameters, some metrics and the losses during the training procedure
394
+ ![plot](bash_examples/figures/aim1.png)
395
+ but also some prediction (the first sample of the first batch of the validation set, every 10% of the maximum number of epochs.)
396
+ ![plot](bash_examples/figures/aim2.png)
397
+
398
+
399
+ ## TODO
400
+ [ ] reduce test time
401
+
402
+ [ ] add pre-commit hook for code checking (`ruff check --ignore E501,E722 .`)
403
+
404
+ [ ] add pre-commit hook testing
405
+
406
+ [ ] clean code and standardize documentation
407
+
408
+ [ ] add more sintetic data
409
+
410
+ [ ] check all the code in the README
411
+
412
+ [ ] check architecture description (which model can be used under certain assumption)
413
+
414
+ [ ] complete the classification part (loss function + inference step)
415
+
416
+
417
+ [ ] check D3VAE, it seems broken in some configurations
418
+
419
+ [ ] add hybrid models https://www.sciencedirect.com/science/article/pii/S138912862400118X
420
+
421
+ [ ] add SOFTS https://github.com/Secilia-Cxy/SOFTS/blob/main/models/SOFTS.py
422
+
423
+ [ ] add https://github.com/Hank0626/PDF/blob/main/models/PDF.py
424
+
425
+ [ ] add https://github.com/decisionintelligence/pathformer
426
+
427
+ [x] add Duet
428
+
429
+ [x] add categorical support to Crossformer, Samformer
430
+
431
+ [ ] in 1.1.5 we split the future and past categorical variables. D3VAE, Diffusion, TTM need to be revised
432
+
433
+ [ ] all snippet of code and notebook must be review in 1.1.5 (categorical past and future, embedding layer parameters)
434
+