dsipts 1.1.6__tar.gz → 1.1.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dsipts might be problematic. Click here for more details.

Files changed (86) hide show
  1. {dsipts-1.1.6 → dsipts-1.1.7}/PKG-INFO +7 -3
  2. {dsipts-1.1.6 → dsipts-1.1.7}/README.md +6 -2
  3. {dsipts-1.1.6 → dsipts-1.1.7}/pyproject.toml +1 -1
  4. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/data_structure/data_structure.py +2 -0
  5. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/DilatedConv.py +3 -20
  6. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/base.py +33 -11
  7. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/base_v2.py +37 -11
  8. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts.egg-info/PKG-INFO +7 -3
  9. {dsipts-1.1.6 → dsipts-1.1.7}/setup.cfg +0 -0
  10. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/__init__.py +0 -0
  11. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/data_management/__init__.py +0 -0
  12. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/data_management/monash.py +0 -0
  13. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/data_management/public_datasets.py +0 -0
  14. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/data_structure/__init__.py +0 -0
  15. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/data_structure/modifiers.py +0 -0
  16. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/data_structure/utils.py +0 -0
  17. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/Autoformer.py +0 -0
  18. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/CrossFormer.py +0 -0
  19. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/D3VAE.py +0 -0
  20. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/Diffusion.py +0 -0
  21. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/DilatedConvED.py +0 -0
  22. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/Duet.py +0 -0
  23. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/ITransformer.py +0 -0
  24. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/Informer.py +0 -0
  25. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/LinearTS.py +0 -0
  26. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/PatchTST.py +0 -0
  27. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/Persistent.py +0 -0
  28. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/RNN.py +0 -0
  29. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/Samformer.py +0 -0
  30. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/TFT.py +0 -0
  31. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/TIDE.py +0 -0
  32. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/TTM.py +0 -0
  33. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/TimeXER.py +0 -0
  34. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/VQVAEA.py +0 -0
  35. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/VVA.py +0 -0
  36. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/__init__.py +0 -0
  37. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/autoformer/__init__.py +0 -0
  38. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/autoformer/layers.py +0 -0
  39. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/crossformer/__init__.py +0 -0
  40. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/crossformer/attn.py +0 -0
  41. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/crossformer/cross_decoder.py +0 -0
  42. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/crossformer/cross_embed.py +0 -0
  43. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/crossformer/cross_encoder.py +0 -0
  44. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/d3vae/__init__.py +0 -0
  45. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/d3vae/diffusion_process.py +0 -0
  46. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/d3vae/embedding.py +0 -0
  47. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/d3vae/encoder.py +0 -0
  48. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/d3vae/model.py +0 -0
  49. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/d3vae/neural_operations.py +0 -0
  50. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/d3vae/resnet.py +0 -0
  51. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/d3vae/utils.py +0 -0
  52. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/duet/__init__.py +0 -0
  53. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/duet/layers.py +0 -0
  54. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/duet/masked.py +0 -0
  55. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/informer/__init__.py +0 -0
  56. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/informer/attn.py +0 -0
  57. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/informer/decoder.py +0 -0
  58. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/informer/embed.py +0 -0
  59. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/informer/encoder.py +0 -0
  60. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/itransformer/Embed.py +0 -0
  61. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/itransformer/SelfAttention_Family.py +0 -0
  62. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/itransformer/Transformer_EncDec.py +0 -0
  63. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/itransformer/__init__.py +0 -0
  64. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/patchtst/__init__.py +0 -0
  65. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/patchtst/layers.py +0 -0
  66. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/samformer/__init__.py +0 -0
  67. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/samformer/utils.py +0 -0
  68. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/tft/__init__.py +0 -0
  69. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/tft/sub_nn.py +0 -0
  70. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/timexer/Layers.py +0 -0
  71. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/timexer/__init__.py +0 -0
  72. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/ttm/__init__.py +0 -0
  73. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/ttm/configuration_tinytimemixer.py +0 -0
  74. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/ttm/consts.py +0 -0
  75. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/ttm/modeling_tinytimemixer.py +0 -0
  76. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/ttm/utils.py +0 -0
  77. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/utils.py +0 -0
  78. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/vva/__init__.py +0 -0
  79. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/vva/minigpt.py +0 -0
  80. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/vva/vqvae.py +0 -0
  81. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/xlstm/__init__.py +0 -0
  82. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts/models/xlstm/xLSTM.py +0 -0
  83. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts.egg-info/SOURCES.txt +0 -0
  84. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts.egg-info/dependency_links.txt +0 -0
  85. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts.egg-info/requires.txt +0 -0
  86. {dsipts-1.1.6 → dsipts-1.1.7}/src/dsipts.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dsipts
3
- Version: 1.1.6
3
+ Version: 1.1.7
4
4
  Summary: Unified library for timeseries modelling
5
5
  Author-email: Andrea Gobbi <agobbi@fbk.eu>
6
6
  Project-URL: Homepage, https://github.com/DSIP-FBK/DSIPTS
@@ -128,10 +128,14 @@ or attention based models:
128
128
  ## Install
129
129
  Clone the repo (gitlab or github)
130
130
  The library is structured to work with [uv](https://github.com/astral-sh/uv). After installing `uv` just run
131
- ```
131
+ ```bash
132
132
  uv pip install .
133
133
  ```
134
- The pip package wi
134
+ You can install also the package from pip (be sure that the python version is less than 3.12, still sperimental):
135
+ ```bash
136
+ uv venv --python 3.11
137
+ uv pip install dsipts
138
+ ```
135
139
 
136
140
 
137
141
  ## For developers
@@ -96,10 +96,14 @@ or attention based models:
96
96
  ## Install
97
97
  Clone the repo (gitlab or github)
98
98
  The library is structured to work with [uv](https://github.com/astral-sh/uv). After installing `uv` just run
99
- ```
99
+ ```bash
100
100
  uv pip install .
101
101
  ```
102
- The pip package wi
102
+ You can install also the package from pip (be sure that the python version is less than 3.12, still sperimental):
103
+ ```bash
104
+ uv venv --python 3.11
105
+ uv pip install dsipts
106
+ ```
103
107
 
104
108
 
105
109
  ## For developers
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "dsipts"
3
- version = "1.1.6"
3
+ version = "1.1.7"
4
4
  description = "Unified library for timeseries modelling"
5
5
  readme = "README.md"
6
6
  requires-python = "==3.11.13"
@@ -800,6 +800,7 @@ class TimeSeries():
800
800
  callbacks=[checkpoint_callback,mc],
801
801
  auto_lr_find=auto_lr_find,
802
802
  accelerator=accelerator,
803
+ log_every_n_steps=5,
803
804
  devices=devices,
804
805
  strategy=strategy,
805
806
  enable_progress_bar=False,
@@ -813,6 +814,7 @@ class TimeSeries():
813
814
  callbacks=[checkpoint_callback,mc],
814
815
  strategy='auto',
815
816
  devices=devices,
817
+ log_every_n_steps=5,
816
818
  enable_progress_bar=False,
817
819
  precision=precision,
818
820
  gradient_clip_val=gradient_clip_val,
@@ -231,28 +231,11 @@ class DilatedConv(Base):
231
231
  activation(),
232
232
  nn.Linear(hidden_RNN//4,1)))
233
233
 
234
+ self.return_additional_loss = True
234
235
 
235
236
 
236
237
 
237
238
 
238
- def training_step(self, batch, batch_idx):
239
- """
240
- pythotrch lightening stuff
241
-
242
- :meta private:
243
- """
244
- y_hat,score = self(batch)
245
- return self.compute_loss(batch,y_hat)#+torch.abs(score-self.glu_percentage)*loss/5.0 ##TODO investigating
246
-
247
- def validation_step(self, batch, batch_idx):
248
- """
249
- pythotrch lightening stuff
250
-
251
- :meta private:
252
- """
253
- y_hat,score = self(batch)
254
- return self.compute_loss(batch,y_hat)#+torch.abs(score-self.glu_percentage)*loss/5.0 ##TODO investigating
255
-
256
239
  def forward(self, batch):
257
240
  """It is mandatory to implement this method
258
241
 
@@ -332,11 +315,11 @@ class DilatedConv(Base):
332
315
  res = res.reshape(B,self.future_steps,-1,self.mul)
333
316
  if self.remove_last:
334
317
  res+=x_start.unsqueeze(1)
335
-
318
+
336
319
 
337
320
  return res, score
338
321
 
339
322
  def inference(self, batch:dict)->torch.tensor:
340
-
323
+
341
324
  res, score = self(batch)
342
325
  return res
@@ -154,7 +154,7 @@ class Base(pl.LightningModule):
154
154
  assert self.out_channels==1, "Classification require only one channel"
155
155
 
156
156
  self.future_steps = future_steps
157
-
157
+ self.return_additional_loss = False
158
158
  beauty_string(self.description,'info',True)
159
159
  @abstractmethod
160
160
  def forward(self, batch:dict)-> torch.tensor:
@@ -247,14 +247,22 @@ class Base(pl.LightningModule):
247
247
  opt = self.optimizers()
248
248
  def closure():
249
249
  opt.zero_grad()
250
- y_hat = self(batch)
251
- loss = self.compute_loss(batch,y_hat)
250
+ if self.return_additional_loss:
251
+ y_hat,score = self(batch)
252
+ loss = self.compute_loss(batch,y_hat) + score
253
+ else:
254
+ y_hat = self(batch)
255
+ loss = self.compute_loss(batch,y_hat)
252
256
  self.manual_backward(loss)
253
257
  return loss
254
258
 
255
259
  opt.step(closure)
256
- y_hat = self(batch)
257
- loss = self.compute_loss(batch,y_hat)
260
+ if self.return_additional_loss:
261
+ y_hat,score = self(batch)
262
+ loss = self.compute_loss(batch,y_hat)+score
263
+ else:
264
+ y_hat = self(batch)
265
+ loss = self.compute_loss(batch,y_hat)
258
266
 
259
267
  #opt.first_step(zero_grad=True)
260
268
 
@@ -269,8 +277,14 @@ class Base(pl.LightningModule):
269
277
 
270
278
  #self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.increment("optimizer")
271
279
  else:
272
- y_hat = self(batch)
273
- loss = self.compute_loss(batch,y_hat)
280
+ if self.return_additional_loss:
281
+ y_hat,score = self(batch)
282
+ loss = self.compute_loss(batch,y_hat)+score
283
+ else:
284
+ y_hat = self(batch)
285
+ loss = self.compute_loss(batch,y_hat)
286
+
287
+ self.train_epoch_metrics.append(loss.item())
274
288
  return loss
275
289
 
276
290
 
@@ -280,7 +294,11 @@ class Base(pl.LightningModule):
280
294
 
281
295
  :meta private:
282
296
  """
283
- y_hat = self(batch)
297
+ if self.return_additional_loss:
298
+ y_hat,score = self(batch)
299
+ else:
300
+ y_hat = self(batch)
301
+ score = 0
284
302
  if batch_idx==0:
285
303
  if self.use_quantiles:
286
304
  idx = 1
@@ -301,7 +319,7 @@ class Base(pl.LightningModule):
301
319
  self.logger.experiment.track(Image(fig), name='cm_training_end')
302
320
  #self.log(f"example_{i}", np.stack([real, pred]).T,sync_dist=True)
303
321
 
304
- return self.compute_loss(batch,y_hat)
322
+ return self.compute_loss(batch,y_hat)+score
305
323
 
306
324
 
307
325
  def validation_epoch_end(self, outs):
@@ -310,8 +328,12 @@ class Base(pl.LightningModule):
310
328
 
311
329
  :meta private:
312
330
  """
313
-
314
- loss = torch.stack(outs).mean()
331
+ if len(outs)==0:
332
+ loss = 10000
333
+ beauty_string(f'THIS IS A BUG, It should be polulated','info',self.verbose)
334
+ else:
335
+ loss = torch.stack(outs).mean()
336
+
315
337
  self.log("val_loss", loss.item(),sync_dist=True)
316
338
  beauty_string(f'Epoch: {self.count_epoch} train error: {self.train_loss_epoch:.4f} validation loss: {loss.item():.4f}','info',self.verbose)
317
339
 
@@ -157,7 +157,7 @@ class Base(pl.LightningModule):
157
157
 
158
158
 
159
159
  self.future_steps = future_steps
160
-
160
+ self.return_additional_loss = False
161
161
  beauty_string(self.description,'info',True)
162
162
  @abstractmethod
163
163
  def forward(self, batch:dict)-> torch.tensor:
@@ -250,14 +250,22 @@ class Base(pl.LightningModule):
250
250
  opt = self.optimizers()
251
251
  def closure():
252
252
  opt.zero_grad()
253
- y_hat = self(batch)
254
- loss = self.compute_loss(batch,y_hat)
253
+ if self.return_additional_loss:
254
+ y_hat,score = self(batch)
255
+ loss = self.compute_loss(batch,y_hat) + score
256
+ else:
257
+ y_hat = self(batch)
258
+ loss = self.compute_loss(batch,y_hat)
255
259
  self.manual_backward(loss)
256
260
  return loss
257
261
 
258
262
  opt.step(closure)
259
- y_hat = self(batch)
260
- loss = self.compute_loss(batch,y_hat)
263
+ if self.return_additional_loss:
264
+ y_hat,score = self(batch)
265
+ loss = self.compute_loss(batch,y_hat)+score
266
+ else:
267
+ y_hat = self(batch)
268
+ loss = self.compute_loss(batch,y_hat)
261
269
 
262
270
  #opt.first_step(zero_grad=True)
263
271
 
@@ -272,8 +280,12 @@ class Base(pl.LightningModule):
272
280
 
273
281
  #self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.increment("optimizer")
274
282
  else:
275
- y_hat = self(batch)
276
- loss = self.compute_loss(batch,y_hat)
283
+ if self.return_additional_loss:
284
+ y_hat,score = self(batch)
285
+ loss = self.compute_loss(batch,y_hat)+score
286
+ else:
287
+ y_hat = self(batch)
288
+ loss = self.compute_loss(batch,y_hat)
277
289
 
278
290
  self.train_epoch_metrics.append(loss.item())
279
291
  return loss
@@ -285,7 +297,12 @@ class Base(pl.LightningModule):
285
297
 
286
298
  :meta private:
287
299
  """
288
- y_hat = self(batch)
300
+
301
+ if self.return_additional_loss:
302
+ y_hat,score = self(batch)
303
+ else:
304
+ y_hat = self(batch)
305
+ score = 0
289
306
  if batch_idx==0:
290
307
  if self.use_quantiles:
291
308
  idx = 1
@@ -305,7 +322,7 @@ class Base(pl.LightningModule):
305
322
  ax.set_title(f'Channel {i} first element first batch validation {int(100*self.count_epoch/self.trainer.max_epochs)}%')
306
323
  self.logger.experiment.track(Image(fig), name='cm_training_end')
307
324
  #self.log(f"example_{i}", np.stack([real, pred]).T,sync_dist=True)
308
- self.validation_epoch_metrics.append(self.compute_loss(batch,y_hat))
325
+ self.validation_epoch_metrics.append(self.compute_loss(batch,y_hat)+score)
309
326
  return
310
327
 
311
328
 
@@ -315,7 +332,12 @@ class Base(pl.LightningModule):
315
332
 
316
333
  :meta private:
317
334
  """
318
- avg = torch.stack(self.validation_epoch_metrics).mean()
335
+
336
+ if len(self.validation_epoch_metrics)==0:
337
+ avg = 10000
338
+ beauty_string(f'THIS IS A BUG, It should be polulated','info',self.verbose)
339
+ else:
340
+ avg = torch.stack(self.validation_epoch_metrics).mean()
319
341
  self.validation_epoch_metrics = []
320
342
  self.log("val_loss", avg,sync_dist=True)
321
343
  beauty_string(f'Epoch: {self.count_epoch} train error: {self.train_loss_epoch:.4f} validation loss: {avg:.4f}','info',self.verbose)
@@ -327,7 +349,11 @@ class Base(pl.LightningModule):
327
349
 
328
350
  :meta private:
329
351
  """
330
- avg = np.stack(self.train_epoch_metrics).mean()
352
+ if len(self.train_epoch_metrics)==0:
353
+ avg = 0
354
+ beauty_string(f'THIS IS A BUG, It should be polulated','info',self.verbose)
355
+ else:
356
+ avg = np.stack(self.train_epoch_metrics).mean()
331
357
  self.log("train_loss", avg,sync_dist=True)
332
358
  self.count_epoch+=1
333
359
  self.train_epoch_metrics = []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dsipts
3
- Version: 1.1.6
3
+ Version: 1.1.7
4
4
  Summary: Unified library for timeseries modelling
5
5
  Author-email: Andrea Gobbi <agobbi@fbk.eu>
6
6
  Project-URL: Homepage, https://github.com/DSIP-FBK/DSIPTS
@@ -128,10 +128,14 @@ or attention based models:
128
128
  ## Install
129
129
  Clone the repo (gitlab or github)
130
130
  The library is structured to work with [uv](https://github.com/astral-sh/uv). After installing `uv` just run
131
- ```
131
+ ```bash
132
132
  uv pip install .
133
133
  ```
134
- The pip package wi
134
+ You can install also the package from pip (be sure that the python version is less than 3.12, still sperimental):
135
+ ```bash
136
+ uv venv --python 3.11
137
+ uv pip install dsipts
138
+ ```
135
139
 
136
140
 
137
141
  ## For developers
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes