dsipts 1.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dsipts might be problematic. Click here for more details.

Files changed (81) hide show
  1. dsipts/__init__.py +48 -0
  2. dsipts/data_management/__init__.py +0 -0
  3. dsipts/data_management/monash.py +338 -0
  4. dsipts/data_management/public_datasets.py +162 -0
  5. dsipts/data_structure/__init__.py +0 -0
  6. dsipts/data_structure/data_structure.py +1167 -0
  7. dsipts/data_structure/modifiers.py +213 -0
  8. dsipts/data_structure/utils.py +173 -0
  9. dsipts/models/Autoformer.py +199 -0
  10. dsipts/models/CrossFormer.py +152 -0
  11. dsipts/models/D3VAE.py +196 -0
  12. dsipts/models/Diffusion.py +818 -0
  13. dsipts/models/DilatedConv.py +342 -0
  14. dsipts/models/DilatedConvED.py +310 -0
  15. dsipts/models/Duet.py +197 -0
  16. dsipts/models/ITransformer.py +167 -0
  17. dsipts/models/Informer.py +180 -0
  18. dsipts/models/LinearTS.py +222 -0
  19. dsipts/models/PatchTST.py +181 -0
  20. dsipts/models/Persistent.py +44 -0
  21. dsipts/models/RNN.py +213 -0
  22. dsipts/models/Samformer.py +139 -0
  23. dsipts/models/TFT.py +269 -0
  24. dsipts/models/TIDE.py +296 -0
  25. dsipts/models/TTM.py +252 -0
  26. dsipts/models/TimeXER.py +184 -0
  27. dsipts/models/VQVAEA.py +299 -0
  28. dsipts/models/VVA.py +247 -0
  29. dsipts/models/__init__.py +0 -0
  30. dsipts/models/autoformer/__init__.py +0 -0
  31. dsipts/models/autoformer/layers.py +352 -0
  32. dsipts/models/base.py +439 -0
  33. dsipts/models/base_v2.py +444 -0
  34. dsipts/models/crossformer/__init__.py +0 -0
  35. dsipts/models/crossformer/attn.py +118 -0
  36. dsipts/models/crossformer/cross_decoder.py +77 -0
  37. dsipts/models/crossformer/cross_embed.py +18 -0
  38. dsipts/models/crossformer/cross_encoder.py +99 -0
  39. dsipts/models/d3vae/__init__.py +0 -0
  40. dsipts/models/d3vae/diffusion_process.py +169 -0
  41. dsipts/models/d3vae/embedding.py +108 -0
  42. dsipts/models/d3vae/encoder.py +326 -0
  43. dsipts/models/d3vae/model.py +211 -0
  44. dsipts/models/d3vae/neural_operations.py +314 -0
  45. dsipts/models/d3vae/resnet.py +153 -0
  46. dsipts/models/d3vae/utils.py +630 -0
  47. dsipts/models/duet/__init__.py +0 -0
  48. dsipts/models/duet/layers.py +438 -0
  49. dsipts/models/duet/masked.py +202 -0
  50. dsipts/models/informer/__init__.py +0 -0
  51. dsipts/models/informer/attn.py +185 -0
  52. dsipts/models/informer/decoder.py +50 -0
  53. dsipts/models/informer/embed.py +125 -0
  54. dsipts/models/informer/encoder.py +100 -0
  55. dsipts/models/itransformer/Embed.py +142 -0
  56. dsipts/models/itransformer/SelfAttention_Family.py +355 -0
  57. dsipts/models/itransformer/Transformer_EncDec.py +134 -0
  58. dsipts/models/itransformer/__init__.py +0 -0
  59. dsipts/models/patchtst/__init__.py +0 -0
  60. dsipts/models/patchtst/layers.py +569 -0
  61. dsipts/models/samformer/__init__.py +0 -0
  62. dsipts/models/samformer/utils.py +154 -0
  63. dsipts/models/tft/__init__.py +0 -0
  64. dsipts/models/tft/sub_nn.py +234 -0
  65. dsipts/models/timexer/Layers.py +127 -0
  66. dsipts/models/timexer/__init__.py +0 -0
  67. dsipts/models/ttm/__init__.py +0 -0
  68. dsipts/models/ttm/configuration_tinytimemixer.py +307 -0
  69. dsipts/models/ttm/consts.py +16 -0
  70. dsipts/models/ttm/modeling_tinytimemixer.py +2099 -0
  71. dsipts/models/ttm/utils.py +438 -0
  72. dsipts/models/utils.py +624 -0
  73. dsipts/models/vva/__init__.py +0 -0
  74. dsipts/models/vva/minigpt.py +83 -0
  75. dsipts/models/vva/vqvae.py +459 -0
  76. dsipts/models/xlstm/__init__.py +0 -0
  77. dsipts/models/xlstm/xLSTM.py +255 -0
  78. dsipts-1.1.5.dist-info/METADATA +31 -0
  79. dsipts-1.1.5.dist-info/RECORD +81 -0
  80. dsipts-1.1.5.dist-info/WHEEL +5 -0
  81. dsipts-1.1.5.dist-info/top_level.txt +1 -0
@@ -0,0 +1,255 @@
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+
6
+
7
+ class mLSTM(nn.Module):
8
+ def __init__(self, input_size, hidden_size, num_layers, dropout=0.0):
9
+ super(mLSTM, self).__init__()
10
+ self.input_size = input_size
11
+ self.hidden_size = hidden_size
12
+ self.num_layers = num_layers
13
+ self.dropout = dropout
14
+
15
+ self.lstms = nn.ModuleList([nn.LSTMCell(input_size, hidden_size) for _ in range(num_layers)])
16
+ self.dropout_layers = nn.ModuleList([nn.Dropout(dropout) for _ in range(num_layers - 1)])
17
+
18
+ self.W_q = nn.Linear(input_size, hidden_size)
19
+ self.W_k = nn.Linear(input_size, hidden_size)
20
+ self.W_v = nn.Linear(input_size, hidden_size)
21
+
22
+ self.exp_input_gates = nn.ModuleList([nn.Linear(input_size, hidden_size) for _ in range(num_layers)])
23
+ self.exp_forget_gates = nn.ModuleList([nn.Linear(input_size, hidden_size) for _ in range(num_layers)])
24
+ self.output_gates = nn.ModuleList([nn.Linear(hidden_size, hidden_size) for _ in range(num_layers)])
25
+
26
+ self.reset_parameters()
27
+
28
+ def reset_parameters(self):
29
+ for lstm in self.lstms:
30
+ nn.init.xavier_uniform_(lstm.weight_ih)
31
+ nn.init.xavier_uniform_(lstm.weight_hh)
32
+ nn.init.zeros_(lstm.bias_ih)
33
+ nn.init.zeros_(lstm.bias_hh)
34
+
35
+ nn.init.xavier_uniform_(self.W_q.weight)
36
+ nn.init.xavier_uniform_(self.W_k.weight)
37
+ nn.init.xavier_uniform_(self.W_v.weight)
38
+ nn.init.zeros_(self.W_q.bias)
39
+ nn.init.zeros_(self.W_k.bias)
40
+ nn.init.zeros_(self.W_v.bias)
41
+
42
+ for gate in self.exp_input_gates + self.exp_forget_gates + self.output_gates:
43
+ nn.init.xavier_uniform_(gate.weight)
44
+ nn.init.zeros_(gate.bias)
45
+
46
+ def forward(self, input_seq, hidden_state=None):
47
+ batch_size = input_seq.size(0)
48
+ seq_length = input_seq.size(1)
49
+
50
+ if hidden_state is None:
51
+ hidden_state = self.init_hidden(batch_size)
52
+
53
+ output_seq = []
54
+ for t in range(seq_length):
55
+ x = input_seq[:, t, :]
56
+ queries = self.W_q(x)
57
+ keys = self.W_k(x)
58
+ values = self.W_v(x)
59
+
60
+ new_hidden_state = []
61
+ for i, (lstm, dropout, i_gate, f_gate, o_gate) in enumerate(zip(self.lstms, self.dropout_layers, self.exp_input_gates, self.exp_forget_gates, self.output_gates)):
62
+ if hidden_state[i][0] is None:
63
+ h, C = lstm(x)
64
+ else:
65
+ h, C = hidden_state[i]
66
+
67
+ ii = torch.exp(i_gate(x))
68
+ f = torch.exp(f_gate(x))
69
+
70
+ C_t = f * C + ii * torch.matmul(values.unsqueeze(2), keys.unsqueeze(1)).squeeze(1)
71
+ attn_output = torch.matmul(queries, C_t).squeeze(2)
72
+
73
+ o = torch.sigmoid(o_gate(h))
74
+ h = o * attn_output
75
+ new_hidden_state.append((h, C_t))
76
+
77
+ if i < self.num_layers - 1:
78
+ x = dropout(h)
79
+ else:
80
+ x = h
81
+ hidden_state = new_hidden_state
82
+ output_seq.append(x)
83
+
84
+ output_seq = torch.stack(output_seq, dim=1)
85
+ return output_seq, hidden_state
86
+
87
+ def init_hidden(self, batch_size):
88
+ hidden_state = []
89
+ for lstm in self.lstms:
90
+ h = torch.zeros(batch_size, self.hidden_size, device=lstm.weight_ih.device)
91
+ C = torch.zeros(batch_size, self.hidden_size, self.hidden_size, device=lstm.weight_ih.device)
92
+ hidden_state.append((h, C))
93
+ return hidden_state
94
+ class sLSTM(nn.Module):
95
+ def __init__(self, input_size, hidden_size, num_layers, dropout=0.0):
96
+ super(sLSTM, self).__init__()
97
+ self.input_size = input_size
98
+ self.hidden_size = hidden_size
99
+ self.num_layers = num_layers
100
+ self.dropout = dropout
101
+
102
+ self.lstms = nn.ModuleList([nn.LSTMCell(input_size if i == 0 else hidden_size, hidden_size) for i in range(num_layers)])
103
+ self.dropout_layers = nn.ModuleList([nn.Dropout(dropout) for _ in range(num_layers - 1)])
104
+
105
+ self.exp_forget_gates = nn.ModuleList([nn.Linear(hidden_size, hidden_size) for _ in range(num_layers)])
106
+ self.exp_input_gates = nn.ModuleList([nn.Linear(hidden_size, hidden_size) for _ in range(num_layers)])
107
+
108
+ self.reset_parameters()
109
+
110
+ def reset_parameters(self):
111
+ for lstm in self.lstms:
112
+ nn.init.xavier_uniform_(lstm.weight_ih)
113
+ nn.init.xavier_uniform_(lstm.weight_hh)
114
+ nn.init.zeros_(lstm.bias_ih)
115
+ nn.init.zeros_(lstm.bias_hh)
116
+
117
+ for gate in self.exp_forget_gates + self.exp_input_gates:
118
+ nn.init.xavier_uniform_(gate.weight)
119
+ nn.init.zeros_(gate.bias)
120
+
121
+ def forward(self, input_seq, hidden_state=None):
122
+ batch_size = input_seq.size(0)
123
+ seq_length = input_seq.size(1)
124
+
125
+ if hidden_state is None:
126
+ hidden_state = self.init_hidden(batch_size)
127
+
128
+ output_seq = []
129
+ for t in range(seq_length):
130
+ x = input_seq[:, t, :]
131
+ new_hidden_state = []
132
+ for i, (lstm, dropout, f_gate, i_gate) in enumerate(zip(self.lstms, self.dropout_layers, self.exp_forget_gates, self.exp_input_gates)):
133
+ if hidden_state[i][0] is None:
134
+ h, c = lstm(x)
135
+ else:
136
+ h, c = lstm(x, (hidden_state[i][0], hidden_state[i][1]))
137
+
138
+ f = torch.exp(f_gate(h))
139
+ ii = torch.exp(i_gate(h))
140
+ c = f * c + ii * lstm.weight_hh.new_zeros(batch_size, self.hidden_size)
141
+ new_hidden_state.append((h, c))
142
+
143
+ if i < self.num_layers - 1:
144
+ x = dropout(h)
145
+ else:
146
+ x = h
147
+ hidden_state = new_hidden_state
148
+ output_seq.append(x)
149
+
150
+ output_seq = torch.stack(output_seq, dim=1)
151
+ return output_seq, hidden_state
152
+
153
+ def init_hidden(self, batch_size):
154
+ hidden_state = []
155
+ for lstm in self.lstms:
156
+ h = torch.zeros(batch_size, self.hidden_size, device=lstm.weight_ih.device)
157
+ c = torch.zeros(batch_size, self.hidden_size, device=lstm.weight_ih.device)
158
+ hidden_state.append((h, c))
159
+ return hidden_state
160
+
161
+ class xLSTMBlock(nn.Module):
162
+ def __init__(self, input_size, hidden_size, num_layers, dropout=0.0, bidirectional=False, lstm_type="slstm"):
163
+ super(xLSTMBlock, self).__init__()
164
+ self.input_size = input_size
165
+ self.hidden_size = hidden_size
166
+ self.num_layers = num_layers
167
+ self.dropout = dropout
168
+ self.bidirectional = bidirectional
169
+ self.lstm_type = lstm_type
170
+
171
+ if lstm_type == "slstm":
172
+ self.lstm = sLSTM(input_size, hidden_size, num_layers, dropout)
173
+ elif lstm_type == "mlstm":
174
+ self.lstm = mLSTM(input_size, hidden_size, num_layers, dropout)
175
+ else:
176
+ raise ValueError(f"Invalid LSTM type: {lstm_type}")
177
+
178
+ self.norm = nn.LayerNorm(input_size)
179
+ self.activation = nn.GELU()
180
+ self.dropout_layer = nn.Dropout(dropout)
181
+
182
+ if bidirectional:
183
+ self.proj = nn.Linear(2 * hidden_size, input_size)
184
+ else:
185
+ if lstm_type == "mlstm":
186
+ self.up_proj = nn.Sequential(
187
+ nn.Linear(input_size, 4 * input_size),
188
+ nn.GELU(),
189
+ nn.Linear(4 * input_size, input_size)
190
+ )
191
+ self.proj = nn.Linear(hidden_size, input_size)
192
+
193
+ self.reset_parameters()
194
+
195
+ def reset_parameters(self):
196
+ if hasattr(self, "up_proj"):
197
+ nn.init.xavier_uniform_(self.up_proj[0].weight)
198
+ nn.init.zeros_(self.up_proj[0].bias)
199
+ nn.init.xavier_uniform_(self.up_proj[2].weight)
200
+ nn.init.zeros_(self.up_proj[2].bias)
201
+
202
+ nn.init.xavier_uniform_(self.proj.weight)
203
+ nn.init.zeros_(self.proj.bias)
204
+
205
+ def forward(self, input_seq, hidden_state=None):
206
+ if hasattr(self, "up_proj"):
207
+ input_seq = self.up_proj(input_seq)
208
+
209
+ lstm_output, hidden_state = self.lstm(input_seq, hidden_state)
210
+ if self.lstm_type == "slstm":
211
+ hidden_state = [[hidden_state[i][0], hidden_state[i][1]] for i in range(len(hidden_state))]
212
+
213
+ if self.bidirectional:
214
+ lstm_output = torch.cat((lstm_output[:, :, :self.hidden_size], lstm_output[:, :, -self.hidden_size:]), dim=-1)
215
+
216
+ output = self.activation(self.proj(lstm_output))
217
+ output = self.norm(output + input_seq)
218
+ output = self.dropout_layer(output)
219
+
220
+ return output, hidden_state
221
+
222
+
223
+
224
+ class xLSTM(nn.Module):
225
+ def __init__(self, input_size, hidden_size, num_layers, num_blocks,
226
+ dropout=0.0, bidirectional=False, lstm_type="slstm"):
227
+ super(xLSTM, self).__init__()
228
+ self.input_size = input_size
229
+ self.hidden_size = hidden_size
230
+ self.num_layers = num_layers
231
+ self.num_blocks = num_blocks
232
+ self.dropout = dropout
233
+ self.bidirectional = bidirectional
234
+ self.lstm_type = lstm_type
235
+
236
+ self.blocks = nn.ModuleList([xLSTMBlock(hidden_size,
237
+ hidden_size, num_layers, dropout, bidirectional, lstm_type)
238
+ for i in range(num_blocks)])
239
+
240
+ self.initial = nn.Linear(self.input_size,self.hidden_size)
241
+
242
+ def forward(self, input_seq, hidden_states=None):
243
+
244
+ if hidden_states is None:
245
+ hidden_states = [None] * self.num_blocks
246
+ output_seq = self.initial(input_seq)
247
+ for i, block in enumerate(self.blocks):
248
+
249
+ output_seq, hidden_state = block(output_seq, hidden_states[i])
250
+ if self.lstm_type == "slstm":
251
+ hidden_states[i] = [[hidden_state[j][0], hidden_state[j][1]] for j in range(len(hidden_state))]
252
+ else:
253
+ hidden_states[i] = hidden_state
254
+
255
+ return output_seq, hidden_states
@@ -0,0 +1,31 @@
1
+ Metadata-Version: 2.4
2
+ Name: dsipts
3
+ Version: 1.1.5
4
+ Summary: Unified library for timeseries modelling
5
+ Author-email: Andrea Gobbi <agobbi@fbk.eu>
6
+ Project-URL: Homepage, https://github.com/DSIP-FBK/DSIPTS
7
+ Project-URL: Docs, https://dsip-fbk.github.io/DSIPTS/
8
+ Requires-Python: ==3.11.13
9
+ Description-Content-Type: text/markdown
10
+ Requires-Dist: aim==3.20.1
11
+ Requires-Dist: beautifulsoup4==4.12.0
12
+ Requires-Dist: einops>=0.8.1
13
+ Requires-Dist: html-table-parser-python3==0.3.1
14
+ Requires-Dist: html5lib>=1.1
15
+ Requires-Dist: hydra-core>=1.3.2
16
+ Requires-Dist: hydra-joblib-launcher>=1.2.0
17
+ Requires-Dist: hydra-optuna-sweeper>=1.2.0
18
+ Requires-Dist: hydra-submitit-launcher>=1.2.0
19
+ Requires-Dist: ipykernel>=6.30.1
20
+ Requires-Dist: lightning>=2.5.4
21
+ Requires-Dist: matplotlib>=3.10.6
22
+ Requires-Dist: nbformat>=5.10.4
23
+ Requires-Dist: numba>=0.61.2
24
+ Requires-Dist: numpy<2.0.0
25
+ Requires-Dist: pandas>=2.3.2
26
+ Requires-Dist: plotly>=6.3.0
27
+ Requires-Dist: scikit-learn>=1.7.1
28
+ Requires-Dist: sphinx>=8.2.3
29
+ Requires-Dist: sphinx-mdinclude>=0.6.2
30
+ Requires-Dist: sphinx-pdj-theme>=0.7.3
31
+ Requires-Dist: transformers>=4.56.0
@@ -0,0 +1,81 @@
1
+ dsipts/__init__.py,sha256=k3fLLS66oWHI_TOLccv-foIlNZNO_NMTABw14OntaAc,1651
2
+ dsipts/data_management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ dsipts/data_management/monash.py,sha256=aZxq9FbIH6IsU8Lwou1hAokXjgOAK-wdl2VAeFg2k4M,13075
4
+ dsipts/data_management/public_datasets.py,sha256=yXFzOZZ-X0ZG1DoqVU-zFmEGVMc2033YDQhRgYxY8ws,6793
5
+ dsipts/data_structure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ dsipts/data_structure/data_structure.py,sha256=5yHi6N0zXYzzKooy-91-5C1PEdNuU6ZWD-mApJvDOe8,58888
7
+ dsipts/data_structure/modifiers.py,sha256=qlry9dfw8pEE0GrvgwROZJkJ6oPpUnjEHPIG5qIetss,7948
8
+ dsipts/data_structure/utils.py,sha256=QwfKPZgSy6DIw5n6ztOdPJIAnzo4EnlMTgRbpiWnyko,6593
9
+ dsipts/models/Autoformer.py,sha256=ddGT3L9T4gAXNJHx1TsuYZy7j63Anyr0rkqqXaOoSu4,8447
10
+ dsipts/models/CrossFormer.py,sha256=iO64L3S01jxuWA9dmm8FsK1WRvBIXbZ0PQ2tZlEQg4w,6481
11
+ dsipts/models/D3VAE.py,sha256=NstHIniNteBRrkfL7SJ3-bJEl3l3IIxoSxavRV3j16U,6857
12
+ dsipts/models/Diffusion.py,sha256=pUujnrdeSSkj4jC1RORbcptt03KpuCsGVwg414o4LPg,40733
13
+ dsipts/models/DilatedConv.py,sha256=2gK69p4Jn9nEI2T2PebNOr70wpyR2QWxzmNQIXRAmJE,14845
14
+ dsipts/models/DilatedConvED.py,sha256=fXk1-EWiRC5J_VIepTjYKya_D02SlEAkyiJcCjhW_XU,14004
15
+ dsipts/models/Duet.py,sha256=EharWHT_r7tEYIk7BkozVLPZ0xptE5mmQmeFGm3uBsA,7628
16
+ dsipts/models/ITransformer.py,sha256=jO8wxLaC06Wgu4GncrFFTISv3pVyfFLLhQvbEOYsz6Y,7368
17
+ dsipts/models/Informer.py,sha256=ByJ00qGk12ONFF7NZWAACzxxRb5UXcu5wpkGMYX9Cq4,6920
18
+ dsipts/models/LinearTS.py,sha256=B0-Sz4POwUyl-PN2ssSx8L-ZHgwrQQPcMmreyvSS47U,9104
19
+ dsipts/models/PatchTST.py,sha256=Z7DM1Kw5Ym8Hh9ywj0j9RuFtKaz_yVZmKFIYafjceM8,9061
20
+ dsipts/models/Persistent.py,sha256=URwyaBb0M7zbPXSGMImtHlwC9XCy-OquFCwfWvn3P70,1249
21
+ dsipts/models/RNN.py,sha256=W6-3ZPD6vkcNoxV9liqcm_8RD8qfF1JY-J7M1ngk6LA,9594
22
+ dsipts/models/Samformer.py,sha256=s61Hi1o9iuw-KgSBPfiE80oJcK1j2fUA6N9f5BJgKJc,5551
23
+ dsipts/models/TFT.py,sha256=JO2-AKIUag7bfm9Oeo4KmGfdYZJbzQBHPDqGVg0WUZI,13830
24
+ dsipts/models/TIDE.py,sha256=i8qXac2gImEVgE2X6cNxqW5kuQP3rzWMlQNdgJbNmKM,13033
25
+ dsipts/models/TTM.py,sha256=WpCiTN0qX3JFO6xgPLedoqMKXUC2pQpNAe9ee-Rw89Q,10602
26
+ dsipts/models/TimeXER.py,sha256=aCg0003LxYZzqZWyWugpbW_iOybcdHN4OH6_v77qp4o,7056
27
+ dsipts/models/VQVAEA.py,sha256=sNJi8UZh-10qEIKcZK3SzhlOFUUjvqjoglzeZBFaeZM,13789
28
+ dsipts/models/VVA.py,sha256=BnPkJ0Nzue0oShSHZVRNlf5RvT0Iwtf9bx19vLB9Nn0,11939
29
+ dsipts/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
+ dsipts/models/base.py,sha256=uqAjDdAjpnDWBlEeGuy-SZir0K8GjsoyMgxwlT1DQYg,17523
31
+ dsipts/models/base_v2.py,sha256=YHrejmxTmICXvLaXiual8j4dztSnWoQevIZWwy9zmfQ,17695
32
+ dsipts/models/utils.py,sha256=H1lr1lukDk7FNyXXTJh217tyTBsBW8hVDQ6jL9oev7I,21765
33
+ dsipts/models/autoformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
+ dsipts/models/autoformer/layers.py,sha256=xHt8V1lKdD1cIvgxXdDbI_EqOz4zgOQ6LP8l7M1pAxM,13276
35
+ dsipts/models/crossformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
+ dsipts/models/crossformer/attn.py,sha256=nUPdhU0wul222We8lvtr7SGeMy2VA_86vaMhnzH1t9g,4580
37
+ dsipts/models/crossformer/cross_decoder.py,sha256=mTo7urCnXrEgynYgyQSjRdE1T8s5fSE1daUaFyATeHU,3032
38
+ dsipts/models/crossformer/cross_embed.py,sha256=SkOHN5KwRObNYzu858Q40EMSgqCkBzNq2SYwoJ0SdcQ,609
39
+ dsipts/models/crossformer/cross_encoder.py,sha256=_ivTGddvvN82Q1E_TRPnLDwC2_s3xOIQXfUQ5lVbOZU,3238
40
+ dsipts/models/d3vae/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
+ dsipts/models/d3vae/diffusion_process.py,sha256=qbe-8da4TnPb-152W6JVyUKqllVgfOda43Yr_e3XebE,6050
42
+ dsipts/models/d3vae/embedding.py,sha256=3S6QnOtjcJJYIFm0UcNhO12HSj6MisCTYm3ky3zuhmQ,3797
43
+ dsipts/models/d3vae/encoder.py,sha256=c6cq9xKzT_xGcCf6hZy1UE2mxCPtuKwUsQXr1JJKfnE,13114
44
+ dsipts/models/d3vae/model.py,sha256=s6tqGdZ9dQdPXRLsNxHIbPpSs07t-tJttjTHCeibL48,9238
45
+ dsipts/models/d3vae/neural_operations.py,sha256=C70kUtQ0ox9MeXBdu4rPDqt022_hVtcN_MNOD8y1cqI,10594
46
+ dsipts/models/d3vae/resnet.py,sha256=3bnlrEBM2DGiAJV8TeSv2tm27Gm-_P6hee41t8QQFL8,5520
47
+ dsipts/models/d3vae/utils.py,sha256=fmUsE_67uwizjeR1_pDdsndyQddbqt27Lv31XBEn-gw,23798
48
+ dsipts/models/duet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
+ dsipts/models/duet/layers.py,sha256=ikMAKr4DzhGt9J1KuBBQzVYXMHZUZVsV29P6TVJCa_Y,18141
50
+ dsipts/models/duet/masked.py,sha256=lkdAB5kwAgV7QfBSVP_QeDr_mB09Rz4302p-KwZpUV4,7111
51
+ dsipts/models/informer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
+ dsipts/models/informer/attn.py,sha256=ghrQGfAqt-Z_7qU5D_aixobmwk6pBKMLAdaNfg-QZbo,6839
53
+ dsipts/models/informer/decoder.py,sha256=gvMfwI6Lyi0JIZlXqx6TVDYEUwRHFDYekzfbEb0A2ck,1759
54
+ dsipts/models/informer/embed.py,sha256=hYBIhZvaF9RUuMxF8_-ybpnOvQ2mgqzn3zPFv8Okx6k,4449
55
+ dsipts/models/informer/encoder.py,sha256=H9O_2hOJDcISj_CFbFcZR-tU3n-NtQFnlpNy3G5p8tY,3573
56
+ dsipts/models/itransformer/Embed.py,sha256=BM6v2BsRuLNGPEN4L2HSyC1VHXjWTx63L0WH8_FJpsA,5122
57
+ dsipts/models/itransformer/SelfAttention_Family.py,sha256=739Jvoo1vJ6TR53zG6msc71DdkC0dUgVZ0ZdP58Dd_s,13674
58
+ dsipts/models/itransformer/Transformer_EncDec.py,sha256=kkHdKKJ48gFaaW2MHy0YFd5xvaM3YoN9Jk4wsKO8RP4,4898
59
+ dsipts/models/itransformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
60
+ dsipts/models/patchtst/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
61
+ dsipts/models/patchtst/layers.py,sha256=o8s--1NQcE9ItlkHse_NUxix0xfxJ9yNSmy98PMVvyc,24177
62
+ dsipts/models/samformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
63
+ dsipts/models/samformer/utils.py,sha256=62p5fzippKwZpqZBQghrHyA_ANeaFa-TC5EM4L6Q7DE,5583
64
+ dsipts/models/tft/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
65
+ dsipts/models/tft/sub_nn.py,sha256=6UDI0BvxEcF5N1_Wx3-WL0PO99k8QtI7JTsEAaosb5k,8881
66
+ dsipts/models/timexer/Layers.py,sha256=-QG4a70sBQFoRoE6CfisOkKhm4kIdB21DBtQ4V-7fHw,4699
67
+ dsipts/models/timexer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
68
+ dsipts/models/ttm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
69
+ dsipts/models/ttm/configuration_tinytimemixer.py,sha256=b_cvewRZQuPSuJx0GEDFIY85hThf7612ynukDciYlZE,16414
70
+ dsipts/models/ttm/consts.py,sha256=tPL5yxHR9fQaSMaOREcj-7ML8lZ_FX5ZPiLwkBo2814,340
71
+ dsipts/models/ttm/modeling_tinytimemixer.py,sha256=55G12e_Vei2aK9g-C5PDwwri-ujXbkQ5cpH4nUkbiiU,87006
72
+ dsipts/models/ttm/utils.py,sha256=dpBUg1Nci_9-MBnWMHiZUW7WWAQ4oRn_it3X5FdCOhE,26379
73
+ dsipts/models/vva/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
74
+ dsipts/models/vva/minigpt.py,sha256=bg0JddqSD322uxSGexen3nPXL_hGTsk3vNLR62d7-w8,3551
75
+ dsipts/models/vva/vqvae.py,sha256=RzCQ_M9xBprp7_x20dSV3EQqlO0FjPUGWV-qdyKrQsM,19680
76
+ dsipts/models/xlstm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
77
+ dsipts/models/xlstm/xLSTM.py,sha256=ZKZZmffmIq1Vb71CR4GSyM8viqVx-u0FChxhcNgHub8,10081
78
+ dsipts-1.1.5.dist-info/METADATA,sha256=qR-kBKMsp_LN4nGOzRE6piEsaiDaD5WUB8-ASMRWbAQ,1082
79
+ dsipts-1.1.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
80
+ dsipts-1.1.5.dist-info/top_level.txt,sha256=i6o0rf5ScFwZK21E89dSKjVNjUBkrEQpn0-Vij43748,7
81
+ dsipts-1.1.5.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ dsipts