dsipts 1.1.11__py3-none-any.whl → 1.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dsipts/data_structure/data_structure.py +23 -8
- dsipts/models/Autoformer.py +2 -1
- dsipts/models/CrossFormer.py +2 -1
- dsipts/models/D3VAE.py +2 -1
- dsipts/models/Diffusion.py +3 -0
- dsipts/models/DilatedConv.py +2 -1
- dsipts/models/DilatedConvED.py +2 -1
- dsipts/models/Duet.py +2 -1
- dsipts/models/ITransformer.py +3 -0
- dsipts/models/Informer.py +2 -1
- dsipts/models/LinearTS.py +2 -1
- dsipts/models/PatchTST.py +3 -0
- dsipts/models/RNN.py +2 -1
- dsipts/models/Samformer.py +3 -1
- dsipts/models/Simple.py +3 -1
- dsipts/models/TFT.py +4 -0
- dsipts/models/TIDE.py +4 -1
- dsipts/models/TTM.py +22 -9
- dsipts/models/TimeXER.py +3 -1
- dsipts/models/base_v2.py +7 -8
- dsipts/models/duet/layers.py +6 -2
- {dsipts-1.1.11.dist-info → dsipts-1.1.12.dist-info}/METADATA +1 -1
- {dsipts-1.1.11.dist-info → dsipts-1.1.12.dist-info}/RECORD +25 -25
- {dsipts-1.1.11.dist-info → dsipts-1.1.12.dist-info}/WHEEL +0 -0
- {dsipts-1.1.11.dist-info → dsipts-1.1.12.dist-info}/top_level.txt +0 -0
|
@@ -683,10 +683,7 @@ class TimeSeries():
|
|
|
683
683
|
#self.model.apply(weight_init_zeros)
|
|
684
684
|
|
|
685
685
|
self.config = config
|
|
686
|
-
|
|
687
|
-
self.model = torch.compile(self.model)
|
|
688
|
-
except:
|
|
689
|
-
beauty_string('Can not compile the model','block',self.verbose)
|
|
686
|
+
|
|
690
687
|
|
|
691
688
|
beauty_string('Setting the model','block',self.verbose)
|
|
692
689
|
beauty_string(model,'',self.verbose)
|
|
@@ -812,8 +809,17 @@ class TimeSeries():
|
|
|
812
809
|
weight_exists = False
|
|
813
810
|
beauty_string('I can not load a previous model','section',self.verbose)
|
|
814
811
|
|
|
812
|
+
self.model.to(torch.device("cuda:0" if torch.cuda.is_available() else "cpu"))
|
|
813
|
+
if self.model.can_be_compiled():
|
|
814
|
+
try:
|
|
815
|
+
self.model = torch.compile(self.model)
|
|
816
|
+
beauty_string('Model COMPILED','block',self.verbose)
|
|
817
|
+
|
|
818
|
+
except:
|
|
819
|
+
beauty_string('Can not compile the model','block',self.verbose)
|
|
820
|
+
else:
|
|
821
|
+
beauty_string('Model can not still be compiled, be patient','block',self.verbose)
|
|
815
822
|
|
|
816
|
-
|
|
817
823
|
|
|
818
824
|
if OLD_PL:
|
|
819
825
|
trainer = pl.Trainer(default_root_dir=dirpath,
|
|
@@ -895,10 +901,19 @@ class TimeSeries():
|
|
|
895
901
|
self.losses = pd.DataFrame()
|
|
896
902
|
|
|
897
903
|
try:
|
|
904
|
+
|
|
898
905
|
if OLD_PL:
|
|
899
|
-
self.model
|
|
906
|
+
if isinstance(self.model, torch._dynamo.eval_frame.OptimizedModule):
|
|
907
|
+
self.model = self.model._orig_mod
|
|
908
|
+
self.model.load_from_checkpoint(self.checkpoint_file_last)
|
|
909
|
+
else:
|
|
910
|
+
self.model = self.model.load_from_checkpoint(self.checkpoint_file_last)
|
|
900
911
|
else:
|
|
901
|
-
self.model
|
|
912
|
+
if isinstance(self.model, torch._dynamo.eval_frame.OptimizedModule):
|
|
913
|
+
mm = self.model._orig_mod
|
|
914
|
+
self.model = mm.__class__.load_from_checkpoint(self.checkpoint_file_last)
|
|
915
|
+
else:
|
|
916
|
+
self.model = self.model.__class__.load_from_checkpoint(self.checkpoint_file_last)
|
|
902
917
|
|
|
903
918
|
except Exception as _:
|
|
904
919
|
beauty_string(f'There is a problem loading the weights on file MAYBE CHANGED HOW WEIGHTS ARE LOADED {self.checkpoint_file_last}','section',self.verbose)
|
|
@@ -1186,6 +1201,6 @@ class TimeSeries():
|
|
|
1186
1201
|
self.model = self.model.load_from_checkpoint(tmp_path,verbose=self.verbose,)
|
|
1187
1202
|
else:
|
|
1188
1203
|
self.model = self.model.__class__.load_from_checkpoint(tmp_path,verbose=self.verbose,)
|
|
1189
|
-
|
|
1204
|
+
self.model.to(torch.device("cuda:0" if torch.cuda.is_available() else "cpu"))
|
|
1190
1205
|
except Exception as e:
|
|
1191
1206
|
beauty_string(f'There is a problem loading the weights on file {tmp_path} {e}','section',self.verbose)
|
dsipts/models/Autoformer.py
CHANGED
|
@@ -148,7 +148,8 @@ class Autoformer(Base):
|
|
|
148
148
|
projection=nn.Linear(d_model, self.out_channels*self.mul, bias=True)
|
|
149
149
|
)
|
|
150
150
|
self.projection = nn.Linear(self.past_channels,self.out_channels*self.mul )
|
|
151
|
-
|
|
151
|
+
def can_be_compiled(self):
|
|
152
|
+
return True
|
|
152
153
|
def forward(self, batch):
|
|
153
154
|
|
|
154
155
|
|
dsipts/models/CrossFormer.py
CHANGED
dsipts/models/D3VAE.py
CHANGED
dsipts/models/Diffusion.py
CHANGED
|
@@ -425,6 +425,9 @@ class Diffusion(Base):
|
|
|
425
425
|
loss = self.compute_loss(batch,out)
|
|
426
426
|
return loss
|
|
427
427
|
|
|
428
|
+
def can_be_compiled(self):
|
|
429
|
+
return False
|
|
430
|
+
|
|
428
431
|
# function to concat embedded categorical variables
|
|
429
432
|
def cat_categorical_vars(self, batch:dict):
|
|
430
433
|
"""Extracting categorical context about past and future
|
dsipts/models/DilatedConv.py
CHANGED
dsipts/models/DilatedConvED.py
CHANGED
|
@@ -228,7 +228,8 @@ class DilatedConvED(Base):
|
|
|
228
228
|
nn.BatchNorm1d(hidden_RNN) if use_bn else nn.Dropout(dropout_rate) ,
|
|
229
229
|
Permute() if use_bn else nn.Identity() ,
|
|
230
230
|
nn.Linear(hidden_RNN ,self.mul))
|
|
231
|
-
|
|
231
|
+
def can_be_compiled(self):
|
|
232
|
+
return True
|
|
232
233
|
|
|
233
234
|
|
|
234
235
|
def forward(self, batch):
|
dsipts/models/Duet.py
CHANGED
|
@@ -136,7 +136,8 @@ class Duet(Base):
|
|
|
136
136
|
activation(),
|
|
137
137
|
nn.Linear(dim*2,self.out_channels*self.mul ))
|
|
138
138
|
|
|
139
|
-
|
|
139
|
+
def can_be_compiled(self):
|
|
140
|
+
return False
|
|
140
141
|
def forward(self, batch:dict)-> float:
|
|
141
142
|
# x: [Batch, Input length, Channel]
|
|
142
143
|
x_enc = batch['x_num_past'].to(self.device)
|
dsipts/models/ITransformer.py
CHANGED
|
@@ -101,6 +101,9 @@ class ITransformer(Base):
|
|
|
101
101
|
)
|
|
102
102
|
self.projector = nn.Linear(d_model, self.future_steps*self.mul, bias=True)
|
|
103
103
|
|
|
104
|
+
def can_be_compiled(self):
|
|
105
|
+
return True
|
|
106
|
+
|
|
104
107
|
def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
|
|
105
108
|
if self.use_norm:
|
|
106
109
|
# Normalization from Non-stationary Transformer
|
dsipts/models/Informer.py
CHANGED
dsipts/models/LinearTS.py
CHANGED
|
@@ -143,7 +143,8 @@ class LinearTS(Base):
|
|
|
143
143
|
activation(),
|
|
144
144
|
nn.BatchNorm1d(hidden_size//8) if use_bn else nn.Dropout(dropout_rate) ,
|
|
145
145
|
nn.Linear(hidden_size//8,self.future_steps*self.mul)))
|
|
146
|
-
|
|
146
|
+
def can_be_compiled(self):
|
|
147
|
+
return True
|
|
147
148
|
def forward(self, batch):
|
|
148
149
|
|
|
149
150
|
x = batch['x_num_past'].to(self.device)
|
dsipts/models/PatchTST.py
CHANGED
|
@@ -133,6 +133,9 @@ class PatchTST(Base):
|
|
|
133
133
|
|
|
134
134
|
#self.final_linear = nn.Sequential(nn.Linear(past_channels,past_channels//2),activation(),nn.Dropout(dropout_rate), nn.Linear(past_channels//2,out_channels) )
|
|
135
135
|
|
|
136
|
+
def can_be_compiled(self):
|
|
137
|
+
return True
|
|
138
|
+
|
|
136
139
|
def forward(self, batch): # x: [Batch, Input length, Channel]
|
|
137
140
|
|
|
138
141
|
|
dsipts/models/RNN.py
CHANGED
dsipts/models/Samformer.py
CHANGED
dsipts/models/Simple.py
CHANGED
|
@@ -67,7 +67,9 @@ class Simple(Base):
|
|
|
67
67
|
self.linear = (nn.Sequential(nn.Linear(emb_past_out_channel*self.past_steps+emb_fut_out_channel*self.future_steps+self.past_steps*self.past_channels+self.future_channels*self.future_steps,hidden_size),
|
|
68
68
|
activation(),nn.Dropout(dropout_rate),
|
|
69
69
|
nn.Linear(hidden_size,self.out_channels*self.future_steps*self.mul)))
|
|
70
|
-
|
|
70
|
+
def can_be_compiled(self):
|
|
71
|
+
return True
|
|
72
|
+
|
|
71
73
|
def forward(self, batch):
|
|
72
74
|
|
|
73
75
|
x = batch['x_num_past'].to(self.device)
|
dsipts/models/TFT.py
CHANGED
dsipts/models/TIDE.py
CHANGED
|
@@ -106,7 +106,10 @@ class TIDE(Base):
|
|
|
106
106
|
|
|
107
107
|
# linear for Y lookback
|
|
108
108
|
self.linear_target = nn.Linear(self.past_steps*self.out_channels, self.future_steps*self.out_channels*self.mul)
|
|
109
|
-
|
|
109
|
+
|
|
110
|
+
def can_be_compiled(self):
|
|
111
|
+
return False
|
|
112
|
+
|
|
110
113
|
|
|
111
114
|
def forward(self, batch:dict)-> float:
|
|
112
115
|
"""training process of the diffusion network
|
dsipts/models/TTM.py
CHANGED
|
@@ -12,7 +12,7 @@ except:
|
|
|
12
12
|
from .base import Base
|
|
13
13
|
|
|
14
14
|
|
|
15
|
-
from .ttm.utils import get_model, get_frequency_token, count_parameters
|
|
15
|
+
from .ttm.utils import get_model, get_frequency_token, count_parameters, DEFAULT_FREQUENCY_MAPPING
|
|
16
16
|
from ..data_structure.utils import beauty_string
|
|
17
17
|
from .utils import get_scope
|
|
18
18
|
|
|
@@ -43,6 +43,7 @@ class TTM(Base):
|
|
|
43
43
|
super().__init__(**kwargs)
|
|
44
44
|
self.save_hyperparameters(logger=False)
|
|
45
45
|
|
|
46
|
+
|
|
46
47
|
|
|
47
48
|
self.index_fut = list(exogenous_channel_indices_cont)
|
|
48
49
|
|
|
@@ -52,6 +53,14 @@ class TTM(Base):
|
|
|
52
53
|
self.index_fut_cat = []
|
|
53
54
|
self.freq = freq
|
|
54
55
|
|
|
56
|
+
base_freq_token = get_frequency_token(self.freq) # e.g., shape [n_token] or scalar
|
|
57
|
+
# ensure it's a tensor of integer type
|
|
58
|
+
if not torch.is_tensor(base_freq_token):
|
|
59
|
+
base_freq_token = torch.tensor(base_freq_token)
|
|
60
|
+
base_freq_token = base_freq_token.long()
|
|
61
|
+
self.register_buffer("token", base_freq_token, persistent=True)
|
|
62
|
+
|
|
63
|
+
|
|
55
64
|
self.model = get_model(
|
|
56
65
|
model_path=model_path,
|
|
57
66
|
context_length=self.past_steps,
|
|
@@ -98,14 +107,14 @@ class TTM(Base):
|
|
|
98
107
|
input[:,:,i] = input[:, :, i] / (e-1)
|
|
99
108
|
return input
|
|
100
109
|
|
|
101
|
-
|
|
110
|
+
def can_be_compiled(self):
|
|
111
|
+
return True
|
|
112
|
+
|
|
102
113
|
def forward(self, batch):
|
|
103
|
-
x_enc = batch['x_num_past']
|
|
114
|
+
x_enc = batch['x_num_past'].to(self.device)
|
|
104
115
|
original_indexes = batch['idx_target'][0].tolist()
|
|
105
116
|
|
|
106
117
|
|
|
107
|
-
|
|
108
|
-
|
|
109
118
|
if 'x_cat_past' in batch.keys():
|
|
110
119
|
x_mark_enc = batch['x_cat_past'].to(torch.float32).to(self.device)
|
|
111
120
|
x_mark_enc = self._scaler_past(x_mark_enc)
|
|
@@ -113,7 +122,7 @@ class TTM(Base):
|
|
|
113
122
|
else:
|
|
114
123
|
past_values = x_enc
|
|
115
124
|
|
|
116
|
-
future_values = torch.zeros_like(past_values)
|
|
125
|
+
future_values = torch.zeros_like(past_values).to(self.device)
|
|
117
126
|
future_values = future_values[:,:self.future_steps,:]
|
|
118
127
|
|
|
119
128
|
if 'x_num_future' in batch.keys():
|
|
@@ -124,8 +133,12 @@ class TTM(Base):
|
|
|
124
133
|
future_values[:,:,self.index_cat_fut] = x_mark_dec
|
|
125
134
|
|
|
126
135
|
|
|
127
|
-
#investigating!!
|
|
128
|
-
freq_token = get_frequency_token(self.freq).repeat(past_values.shape[0])
|
|
136
|
+
#investigating!! problem with dynamo!
|
|
137
|
+
#freq_token = get_frequency_token(self.freq).repeat(past_values.shape[0])
|
|
138
|
+
|
|
139
|
+
batch_size = past_values.shape[0]
|
|
140
|
+
freq_token = self.token.repeat(batch_size).long().to(self.device)
|
|
141
|
+
|
|
129
142
|
|
|
130
143
|
res = self.model(
|
|
131
144
|
past_values= past_values,
|
|
@@ -134,7 +147,7 @@ class TTM(Base):
|
|
|
134
147
|
future_observed_mask = None,
|
|
135
148
|
output_hidden_states = False,
|
|
136
149
|
return_dict = False,
|
|
137
|
-
freq_token= freq_token, ##investigating
|
|
150
|
+
freq_token= freq_token,#[0:past_values.shape[0]], ##investigating
|
|
138
151
|
static_categorical_values = None
|
|
139
152
|
)
|
|
140
153
|
|
dsipts/models/TimeXER.py
CHANGED
dsipts/models/base_v2.py
CHANGED
|
@@ -307,7 +307,7 @@ class Base(pl.LightningModule):
|
|
|
307
307
|
self.train_epoch_count +=1
|
|
308
308
|
return loss
|
|
309
309
|
|
|
310
|
-
|
|
310
|
+
|
|
311
311
|
def validation_step(self, batch, batch_idx):
|
|
312
312
|
"""
|
|
313
313
|
pythotrch lightening stuff
|
|
@@ -320,15 +320,14 @@ class Base(pl.LightningModule):
|
|
|
320
320
|
else:
|
|
321
321
|
y_hat = self(batch)
|
|
322
322
|
score = 0
|
|
323
|
-
|
|
323
|
+
#log_this_batch = (batch_idx == 0) and (self.count_epoch % int(max(self.trainer.max_epochs / 100,1)) == 1)
|
|
324
324
|
|
|
325
|
+
#if log_this_batch:
|
|
325
326
|
#track the predictions! We can do better than this but maybe it is better to firstly update pytorch-lightening
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
"y_hat": y_hat.detach().cpu()
|
|
331
|
-
})
|
|
327
|
+
self._val_outputs=[{
|
|
328
|
+
"y": batch['y'].detach().cpu(),
|
|
329
|
+
"y_hat": y_hat.detach().cpu()
|
|
330
|
+
}]
|
|
332
331
|
self.validation_epoch_metrics+= (self.compute_loss(batch,y_hat)+score).detach()
|
|
333
332
|
self.validation_epoch_count+=1
|
|
334
333
|
return None
|
dsipts/models/duet/layers.py
CHANGED
|
@@ -219,7 +219,7 @@ class SparseDispatcher(object):
|
|
|
219
219
|
# expand according to batch index so we can just split by _part_sizes
|
|
220
220
|
inp_exp = inp[self._batch_index].squeeze(1)
|
|
221
221
|
return torch.split(inp_exp, self._part_sizes, dim=0)
|
|
222
|
-
|
|
222
|
+
|
|
223
223
|
def combine(self, expert_out, multiply_by_gates=True):
|
|
224
224
|
"""Sum together the expert output, weighted by the gates.
|
|
225
225
|
The slice corresponding to a particular batch element `b` is computed
|
|
@@ -234,7 +234,9 @@ class SparseDispatcher(object):
|
|
|
234
234
|
a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
|
|
235
235
|
"""
|
|
236
236
|
# apply exp to expert outputs, so we are not longer in log space
|
|
237
|
+
|
|
237
238
|
stitched = torch.cat(expert_out, 0)
|
|
239
|
+
|
|
238
240
|
if multiply_by_gates:
|
|
239
241
|
# stitched = stitched.mul(self._nonzero_gates)
|
|
240
242
|
stitched = torch.einsum("i...,ij->i...", stitched, self._nonzero_gates)
|
|
@@ -430,9 +432,11 @@ class Linear_extractor_cluster(nn.Module):
|
|
|
430
432
|
expert_inputs = dispatcher.dispatch(x_norm)
|
|
431
433
|
|
|
432
434
|
gates = dispatcher.expert_to_gates()
|
|
435
|
+
|
|
433
436
|
expert_outputs = [
|
|
434
437
|
self.experts[i](expert_inputs[i]) for i in range(self.num_experts)
|
|
435
438
|
]
|
|
439
|
+
#y = dispatcher.combine([e for e in expert_outputs if len(e)>0])
|
|
440
|
+
#with torch._dynamo.disable():
|
|
436
441
|
y = dispatcher.combine(expert_outputs)
|
|
437
|
-
|
|
438
442
|
return y, loss
|
|
@@ -3,33 +3,33 @@ dsipts/data_management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
|
|
|
3
3
|
dsipts/data_management/monash.py,sha256=aZxq9FbIH6IsU8Lwou1hAokXjgOAK-wdl2VAeFg2k4M,13075
|
|
4
4
|
dsipts/data_management/public_datasets.py,sha256=yXFzOZZ-X0ZG1DoqVU-zFmEGVMc2033YDQhRgYxY8ws,6793
|
|
5
5
|
dsipts/data_structure/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
-
dsipts/data_structure/data_structure.py,sha256=
|
|
6
|
+
dsipts/data_structure/data_structure.py,sha256=KVkjTVjc7NznJIou4LYGzMbzE7ye-K3ll65GEgn2qKg,60814
|
|
7
7
|
dsipts/data_structure/modifiers.py,sha256=qlry9dfw8pEE0GrvgwROZJkJ6oPpUnjEHPIG5qIetss,7948
|
|
8
8
|
dsipts/data_structure/utils.py,sha256=QwfKPZgSy6DIw5n6ztOdPJIAnzo4EnlMTgRbpiWnyko,6593
|
|
9
|
-
dsipts/models/Autoformer.py,sha256=
|
|
10
|
-
dsipts/models/CrossFormer.py,sha256=
|
|
11
|
-
dsipts/models/D3VAE.py,sha256=
|
|
12
|
-
dsipts/models/Diffusion.py,sha256=
|
|
13
|
-
dsipts/models/DilatedConv.py,sha256=
|
|
14
|
-
dsipts/models/DilatedConvED.py,sha256=
|
|
15
|
-
dsipts/models/Duet.py,sha256=
|
|
16
|
-
dsipts/models/ITransformer.py,sha256=
|
|
17
|
-
dsipts/models/Informer.py,sha256=
|
|
18
|
-
dsipts/models/LinearTS.py,sha256=
|
|
19
|
-
dsipts/models/PatchTST.py,sha256=
|
|
9
|
+
dsipts/models/Autoformer.py,sha256=nUQvPC_qtajLT1AHdNJmF_P3ZL01j3spkZ4ubxdGF3g,8497
|
|
10
|
+
dsipts/models/CrossFormer.py,sha256=ClW6H_hrtLJH0iqTC7q_ya_Bwc_Xu-0lpAN5w2DSUYk,6526
|
|
11
|
+
dsipts/models/D3VAE.py,sha256=d1aY6kGjBSxZncN-KPWpdUGunu182ng2QFInGFrKYQM,6903
|
|
12
|
+
dsipts/models/Diffusion.py,sha256=owst4IxA3hkEEIrn5K-zwAYWUzEhouiRPwM4nTLcyoE,40786
|
|
13
|
+
dsipts/models/DilatedConv.py,sha256=TMDzd_cNgCZa6YusVVVGbTGGH3YlMz0IZZ9ZxRrJ3i4,14334
|
|
14
|
+
dsipts/models/DilatedConvED.py,sha256=KwG83yHqoEx_Vmea69zTPsSP1-0GdOUrtXwvhNDuWj8,14048
|
|
15
|
+
dsipts/models/Duet.py,sha256=m67PStuYE6vkFUFUofBrrLryx1ZUZropyVGcu_ygOx8,7681
|
|
16
|
+
dsipts/models/ITransformer.py,sha256=2WXqqEvnWH2DqRQyXfGm4Eg4_q32GFy2XnNeoTl-KmY,7310
|
|
17
|
+
dsipts/models/Informer.py,sha256=gxCdU2KkNhadyMujBA5A0eP6SPN4Q0IkEIogLYwvz5k,6970
|
|
18
|
+
dsipts/models/LinearTS.py,sha256=vXaGpbbkfdpzpTEWZ1hs6QI6j3vDvevD3SyKQXo6Sdg,9151
|
|
19
|
+
dsipts/models/PatchTST.py,sha256=1O09cPMg8USdkt5q6szTiz5dIY45kizsf6gt6vLKnQo,9119
|
|
20
20
|
dsipts/models/Persistent.py,sha256=URwyaBb0M7zbPXSGMImtHlwC9XCy-OquFCwfWvn3P70,1249
|
|
21
|
-
dsipts/models/RNN.py,sha256=
|
|
22
|
-
dsipts/models/Samformer.py,sha256=
|
|
23
|
-
dsipts/models/Simple.py,sha256=
|
|
24
|
-
dsipts/models/TFT.py,sha256=
|
|
25
|
-
dsipts/models/TIDE.py,sha256=
|
|
26
|
-
dsipts/models/TTM.py,sha256=
|
|
27
|
-
dsipts/models/TimeXER.py,sha256=
|
|
21
|
+
dsipts/models/RNN.py,sha256=RnsRDAQ2z5-XNaJVZd6Q7z23WvPR2uLVdi7BNQyF7QE,9685
|
|
22
|
+
dsipts/models/Samformer.py,sha256=Kt7B9ID3INtFDAVKIM1LTly5-UfKCaVZ9uxAJmYv6B4,5606
|
|
23
|
+
dsipts/models/Simple.py,sha256=8wRSO-gh_Z6Sl8fYMV-RIXIL0RrO5u5dDtsaq-OsKg0,3960
|
|
24
|
+
dsipts/models/TFT.py,sha256=JiI90ikfP8aaR_rtczu8CyGMNLTgml13aYQifgIC_yo,13888
|
|
25
|
+
dsipts/models/TIDE.py,sha256=S1KlKqFOR3jJ9DDiTqeaKvya9hYBsNHBVqwJsYX3FLU,13094
|
|
26
|
+
dsipts/models/TTM.py,sha256=lOOo5dR5nOmf37cND6C8ft8TVl0kzNeraIuABw7eI5g,5897
|
|
27
|
+
dsipts/models/TimeXER.py,sha256=EkmlHfT2RegY6Ce6q8EUEV1a_WZ6SkYibnOZXqsyd_8,7111
|
|
28
28
|
dsipts/models/VQVAEA.py,sha256=sNJi8UZh-10qEIKcZK3SzhlOFUUjvqjoglzeZBFaeZM,13789
|
|
29
29
|
dsipts/models/VVA.py,sha256=BnPkJ0Nzue0oShSHZVRNlf5RvT0Iwtf9bx19vLB9Nn0,11939
|
|
30
30
|
dsipts/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
31
31
|
dsipts/models/base.py,sha256=Gqsycy8ZXGaIVx9vvmYRpBCqdUxGE4tvC5ltgxlpEYY,19640
|
|
32
|
-
dsipts/models/base_v2.py,sha256=
|
|
32
|
+
dsipts/models/base_v2.py,sha256=03cueZExRhkJyBVIHuUPB8sjsCd5Go1HJAR81CADg-c,19896
|
|
33
33
|
dsipts/models/utils.py,sha256=kjTwyktNCFMpPUy6zoleBCSKlvMvK_Jkgyh2T1OXg3E,24497
|
|
34
34
|
dsipts/models/autoformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
35
35
|
dsipts/models/autoformer/layers.py,sha256=xHt8V1lKdD1cIvgxXdDbI_EqOz4zgOQ6LP8l7M1pAxM,13276
|
|
@@ -47,7 +47,7 @@ dsipts/models/d3vae/neural_operations.py,sha256=C70kUtQ0ox9MeXBdu4rPDqt022_hVtcN
|
|
|
47
47
|
dsipts/models/d3vae/resnet.py,sha256=3bnlrEBM2DGiAJV8TeSv2tm27Gm-_P6hee41t8QQFL8,5520
|
|
48
48
|
dsipts/models/d3vae/utils.py,sha256=fmUsE_67uwizjeR1_pDdsndyQddbqt27Lv31XBEn-gw,23798
|
|
49
49
|
dsipts/models/duet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
50
|
-
dsipts/models/duet/layers.py,sha256=
|
|
50
|
+
dsipts/models/duet/layers.py,sha256=TTrhlfSwIXE_7gO9rsdKJD9Bdy3B_JJPCo8vYZJ8Fvg,18258
|
|
51
51
|
dsipts/models/duet/masked.py,sha256=lkdAB5kwAgV7QfBSVP_QeDr_mB09Rz4302p-KwZpUV4,7111
|
|
52
52
|
dsipts/models/informer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
53
53
|
dsipts/models/informer/attn.py,sha256=ghrQGfAqt-Z_7qU5D_aixobmwk6pBKMLAdaNfg-QZbo,6839
|
|
@@ -76,7 +76,7 @@ dsipts/models/vva/minigpt.py,sha256=bg0JddqSD322uxSGexen3nPXL_hGTsk3vNLR62d7-w8,
|
|
|
76
76
|
dsipts/models/vva/vqvae.py,sha256=RzCQ_M9xBprp7_x20dSV3EQqlO0FjPUGWV-qdyKrQsM,19680
|
|
77
77
|
dsipts/models/xlstm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
78
78
|
dsipts/models/xlstm/xLSTM.py,sha256=ZKZZmffmIq1Vb71CR4GSyM8viqVx-u0FChxhcNgHub8,10081
|
|
79
|
-
dsipts-1.1.
|
|
80
|
-
dsipts-1.1.
|
|
81
|
-
dsipts-1.1.
|
|
82
|
-
dsipts-1.1.
|
|
79
|
+
dsipts-1.1.12.dist-info/METADATA,sha256=nxE2kAg9RvG5Py27sMNbQ-mUIu9mtZrDo2WocLpJdQ4,24795
|
|
80
|
+
dsipts-1.1.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
81
|
+
dsipts-1.1.12.dist-info/top_level.txt,sha256=i6o0rf5ScFwZK21E89dSKjVNjUBkrEQpn0-Vij43748,7
|
|
82
|
+
dsipts-1.1.12.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|