dsipts 1.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dsipts might be problematic. Click here for more details.
- dsipts/__init__.py +48 -0
- dsipts/data_management/__init__.py +0 -0
- dsipts/data_management/monash.py +338 -0
- dsipts/data_management/public_datasets.py +162 -0
- dsipts/data_structure/__init__.py +0 -0
- dsipts/data_structure/data_structure.py +1167 -0
- dsipts/data_structure/modifiers.py +213 -0
- dsipts/data_structure/utils.py +173 -0
- dsipts/models/Autoformer.py +199 -0
- dsipts/models/CrossFormer.py +152 -0
- dsipts/models/D3VAE.py +196 -0
- dsipts/models/Diffusion.py +818 -0
- dsipts/models/DilatedConv.py +342 -0
- dsipts/models/DilatedConvED.py +310 -0
- dsipts/models/Duet.py +197 -0
- dsipts/models/ITransformer.py +167 -0
- dsipts/models/Informer.py +180 -0
- dsipts/models/LinearTS.py +222 -0
- dsipts/models/PatchTST.py +181 -0
- dsipts/models/Persistent.py +44 -0
- dsipts/models/RNN.py +213 -0
- dsipts/models/Samformer.py +139 -0
- dsipts/models/TFT.py +269 -0
- dsipts/models/TIDE.py +296 -0
- dsipts/models/TTM.py +252 -0
- dsipts/models/TimeXER.py +184 -0
- dsipts/models/VQVAEA.py +299 -0
- dsipts/models/VVA.py +247 -0
- dsipts/models/__init__.py +0 -0
- dsipts/models/autoformer/__init__.py +0 -0
- dsipts/models/autoformer/layers.py +352 -0
- dsipts/models/base.py +439 -0
- dsipts/models/base_v2.py +444 -0
- dsipts/models/crossformer/__init__.py +0 -0
- dsipts/models/crossformer/attn.py +118 -0
- dsipts/models/crossformer/cross_decoder.py +77 -0
- dsipts/models/crossformer/cross_embed.py +18 -0
- dsipts/models/crossformer/cross_encoder.py +99 -0
- dsipts/models/d3vae/__init__.py +0 -0
- dsipts/models/d3vae/diffusion_process.py +169 -0
- dsipts/models/d3vae/embedding.py +108 -0
- dsipts/models/d3vae/encoder.py +326 -0
- dsipts/models/d3vae/model.py +211 -0
- dsipts/models/d3vae/neural_operations.py +314 -0
- dsipts/models/d3vae/resnet.py +153 -0
- dsipts/models/d3vae/utils.py +630 -0
- dsipts/models/duet/__init__.py +0 -0
- dsipts/models/duet/layers.py +438 -0
- dsipts/models/duet/masked.py +202 -0
- dsipts/models/informer/__init__.py +0 -0
- dsipts/models/informer/attn.py +185 -0
- dsipts/models/informer/decoder.py +50 -0
- dsipts/models/informer/embed.py +125 -0
- dsipts/models/informer/encoder.py +100 -0
- dsipts/models/itransformer/Embed.py +142 -0
- dsipts/models/itransformer/SelfAttention_Family.py +355 -0
- dsipts/models/itransformer/Transformer_EncDec.py +134 -0
- dsipts/models/itransformer/__init__.py +0 -0
- dsipts/models/patchtst/__init__.py +0 -0
- dsipts/models/patchtst/layers.py +569 -0
- dsipts/models/samformer/__init__.py +0 -0
- dsipts/models/samformer/utils.py +154 -0
- dsipts/models/tft/__init__.py +0 -0
- dsipts/models/tft/sub_nn.py +234 -0
- dsipts/models/timexer/Layers.py +127 -0
- dsipts/models/timexer/__init__.py +0 -0
- dsipts/models/ttm/__init__.py +0 -0
- dsipts/models/ttm/configuration_tinytimemixer.py +307 -0
- dsipts/models/ttm/consts.py +16 -0
- dsipts/models/ttm/modeling_tinytimemixer.py +2099 -0
- dsipts/models/ttm/utils.py +438 -0
- dsipts/models/utils.py +624 -0
- dsipts/models/vva/__init__.py +0 -0
- dsipts/models/vva/minigpt.py +83 -0
- dsipts/models/vva/vqvae.py +459 -0
- dsipts/models/xlstm/__init__.py +0 -0
- dsipts/models/xlstm/xLSTM.py +255 -0
- dsipts-1.1.5.dist-info/METADATA +31 -0
- dsipts-1.1.5.dist-info/RECORD +81 -0
- dsipts-1.1.5.dist-info/WHEEL +5 -0
- dsipts-1.1.5.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
## Copyright 2023 Yunhao Zhang and Junchi Yan (https://github.com/Thinklab-SJTU/Crossformer?tab=Apache-2.0-1-ov-file#readme)
|
|
2
|
+
## Code modified for align the notation and the batch generation
|
|
3
|
+
## extended to all present in crossformer folder
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
from torch import nn
|
|
8
|
+
import torch
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import lightning.pytorch as pl
|
|
12
|
+
from .base_v2 import Base
|
|
13
|
+
OLD_PL = False
|
|
14
|
+
except:
|
|
15
|
+
import pytorch_lightning as pl
|
|
16
|
+
OLD_PL = True
|
|
17
|
+
from .base import Base
|
|
18
|
+
from typing import List,Union
|
|
19
|
+
from einops import repeat
|
|
20
|
+
from ..data_structure.utils import beauty_string
|
|
21
|
+
from .utils import get_scope
|
|
22
|
+
from .crossformer.cross_encoder import Encoder
|
|
23
|
+
from .crossformer.cross_decoder import Decoder
|
|
24
|
+
from .crossformer.cross_embed import DSW_embedding
|
|
25
|
+
from .utils import Embedding_cat_variables
|
|
26
|
+
from math import ceil
|
|
27
|
+
from .utils import Embedding_cat_variables
|
|
28
|
+
from .utils import get_activation
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class CrossFormer(Base):
|
|
34
|
+
handle_multivariate = True
|
|
35
|
+
handle_future_covariates = True
|
|
36
|
+
handle_categorical_variables = True
|
|
37
|
+
handle_quantile_loss = True
|
|
38
|
+
|
|
39
|
+
description = get_scope(handle_multivariate,handle_future_covariates,handle_categorical_variables,handle_quantile_loss)
|
|
40
|
+
|
|
41
|
+
def __init__(self,
|
|
42
|
+
|
|
43
|
+
d_model:int,
|
|
44
|
+
hidden_size:int,
|
|
45
|
+
n_head:int,
|
|
46
|
+
seg_len:int,
|
|
47
|
+
n_layer_encoder:int,
|
|
48
|
+
win_size:int,
|
|
49
|
+
factor:int=10,
|
|
50
|
+
dropout_rate:float=0.1,
|
|
51
|
+
activation:str='torch.nn.ReLU',
|
|
52
|
+
|
|
53
|
+
**kwargs)->None:
|
|
54
|
+
""" CrossFormer (https://openreview.net/forum?id=vSVLM2j9eie)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
d_model (int): The dimensionality of the model.
|
|
59
|
+
hidden_size (int): The size of the hidden layers.
|
|
60
|
+
n_head (int): The number of attention heads.
|
|
61
|
+
seg_len (int): The length of the segments.
|
|
62
|
+
n_layer_encoder (int): The number of layers in the encoder.
|
|
63
|
+
win_size (int): The size of the window for attention.
|
|
64
|
+
factor (int, optional): see .crossformer.attn.TwoStageAttentionLayer. Defaults to 10.
|
|
65
|
+
dropout_rate (float, optional): The dropout rate. Defaults to 0.1.
|
|
66
|
+
activation (str, optional): The activation function to use. Defaults to 'torch.nn.ReLU'.
|
|
67
|
+
**kwargs: Additional keyword arguments for the parent class.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
None: This method does not return a value.
|
|
71
|
+
|
|
72
|
+
Raises:
|
|
73
|
+
ValueError: If the activation function is not recognized.
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
if isinstance(activation, str):
|
|
79
|
+
activation = get_activation(activation)
|
|
80
|
+
super().__init__(**kwargs)
|
|
81
|
+
self.save_hyperparameters(logger=False)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
# The padding operation to handle invisible sgemnet length
|
|
85
|
+
self.pad_past_steps = ceil(1.0 *self.past_steps / seg_len) * seg_len
|
|
86
|
+
self.pad_future_steps = ceil(1.0 * self.future_steps / seg_len) * seg_len
|
|
87
|
+
self.past_steps_add = self.pad_past_steps - self.past_steps
|
|
88
|
+
|
|
89
|
+
# Embedding
|
|
90
|
+
self.emb_past = Embedding_cat_variables(self.past_steps,self.emb_dim,self.embs_past, reduction_mode=self.reduction_mode,use_classical_positional_encoder=self.use_classical_positional_encoder,device = self.device)
|
|
91
|
+
self.emb_fut = Embedding_cat_variables(self.future_steps,self.emb_dim,self.embs_fut, reduction_mode=self.reduction_mode,use_classical_positional_encoder=self.use_classical_positional_encoder,device = self.device)
|
|
92
|
+
emb_past_out_channel = self.emb_past.output_channels
|
|
93
|
+
emb_fut_out_channel = self.emb_fut.output_channels
|
|
94
|
+
self.enc_value_embedding = DSW_embedding(seg_len, d_model)
|
|
95
|
+
self.enc_pos_embedding = nn.Parameter(torch.randn(1, self.past_channels+emb_past_out_channel, (self.pad_past_steps // seg_len), d_model))
|
|
96
|
+
self.pre_norm = nn.LayerNorm(d_model)
|
|
97
|
+
|
|
98
|
+
## Custom embeddings ##these are not used in crossformer
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
self.encoder = Encoder(n_layer_encoder, win_size, d_model, n_head, hidden_size, block_depth = 1, \
|
|
102
|
+
dropout = dropout_rate,in_seg_num = (self.pad_past_steps // seg_len), factor = factor)
|
|
103
|
+
|
|
104
|
+
# Decoder
|
|
105
|
+
self.dec_pos_embedding = nn.Parameter(torch.randn(1, self.past_channels+emb_past_out_channel, (self.pad_future_steps // seg_len), d_model))
|
|
106
|
+
self.decoder = Decoder(seg_len, n_layer_encoder + 1, d_model, n_head, hidden_size, dropout_rate, \
|
|
107
|
+
out_seg_num = (self.pad_future_steps // seg_len), factor = factor)
|
|
108
|
+
|
|
109
|
+
dim = self.past_channels+emb_past_out_channel+emb_fut_out_channel+self.future_channels
|
|
110
|
+
self.final_layer = nn.Sequential(activation(),
|
|
111
|
+
nn.Linear(dim, dim//2),
|
|
112
|
+
activation(),
|
|
113
|
+
nn.Linear(dim//2, self.mul*self.out_channels ))
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def forward(self, batch):
|
|
120
|
+
|
|
121
|
+
x_seq = batch['x_num_past'].to(self.device)#[:,:,idx_target]
|
|
122
|
+
BS = x_seq.shape[0]
|
|
123
|
+
|
|
124
|
+
if 'x_cat_future' in batch.keys():
|
|
125
|
+
emb_fut = self.emb_fut(BS,batch['x_cat_future'].to(self.device))
|
|
126
|
+
else:
|
|
127
|
+
emb_fut = self.emb_fut(BS,None)
|
|
128
|
+
if 'x_cat_past' in batch.keys():
|
|
129
|
+
emb_past = self.emb_past(BS,batch['x_cat_past'].to(self.device))
|
|
130
|
+
else:
|
|
131
|
+
emb_past = self.emb_past(BS,None)
|
|
132
|
+
|
|
133
|
+
tmp_future = [emb_fut]
|
|
134
|
+
|
|
135
|
+
if 'x_num_future' in batch.keys():
|
|
136
|
+
x_future = batch['x_num_future'].to(self.device)
|
|
137
|
+
tmp_future.append(x_future)
|
|
138
|
+
|
|
139
|
+
x_seq = torch.cat([x_seq,emb_past],2)
|
|
140
|
+
batch_size = x_seq.shape[0]
|
|
141
|
+
if (self.past_steps_add != 0):
|
|
142
|
+
x_seq = torch.cat((x_seq[:, :1, :].expand(-1, self.past_steps_add, -1), x_seq), dim = 1)
|
|
143
|
+
x_seq = self.enc_value_embedding(x_seq)
|
|
144
|
+
x_seq += self.enc_pos_embedding
|
|
145
|
+
x_seq = self.pre_norm(x_seq)
|
|
146
|
+
enc_out = self.encoder(x_seq)
|
|
147
|
+
dec_in = repeat(self.dec_pos_embedding, 'b ts_d l d -> (repeat b) ts_d l d', repeat = batch_size)
|
|
148
|
+
predict_y = self.decoder(dec_in, enc_out)
|
|
149
|
+
res = predict_y[:, :self.future_steps,:]
|
|
150
|
+
tmp_future.append(res)
|
|
151
|
+
res = self.final_layer(torch.cat(tmp_future,2))
|
|
152
|
+
return res.reshape(BS, -1, self.out_channels,self.mul)
|
dsipts/models/D3VAE.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
## Copiright https://github.com/PaddlePaddle/PaddleSpatial
|
|
2
|
+
## Modified for notation alignmenet, batch structure
|
|
3
|
+
## extended to what inside d3vae folder
|
|
4
|
+
|
|
5
|
+
from torch import nn,optim
|
|
6
|
+
import torch
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import lightning.pytorch as pl
|
|
10
|
+
from .base_v2 import Base
|
|
11
|
+
OLD_PL = False
|
|
12
|
+
except:
|
|
13
|
+
import pytorch_lightning as pl
|
|
14
|
+
OLD_PL = True
|
|
15
|
+
from .base import Base
|
|
16
|
+
from typing import Union
|
|
17
|
+
from .d3vae.model import diffusion_generate, denoise_net,pred_net
|
|
18
|
+
from .utils import Embedding_cat_variables
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
from torch.optim.lr_scheduler import StepLR
|
|
22
|
+
|
|
23
|
+
def copy_parameters(
|
|
24
|
+
net_source: torch.nn.Module,
|
|
25
|
+
net_dest: torch.nn.Module,
|
|
26
|
+
strict= True,
|
|
27
|
+
) -> None:
|
|
28
|
+
"""
|
|
29
|
+
Copies parameters from one network to another.
|
|
30
|
+
Parameters
|
|
31
|
+
----------
|
|
32
|
+
net_source
|
|
33
|
+
Input network.
|
|
34
|
+
net_dest
|
|
35
|
+
Output network.
|
|
36
|
+
strict:
|
|
37
|
+
whether to strictly enforce that the keys
|
|
38
|
+
in :attr:`state_dict` match the keys returned by this module's
|
|
39
|
+
:meth:`~torch.nn.Module.state_dict` function. Default: ``True``
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
net_dest.load_state_dict(net_source.state_dict(), strict=strict)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class D3VAE(Base):
|
|
46
|
+
|
|
47
|
+
def __init__(self,
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
scale=0.1,
|
|
51
|
+
hidden_size=64,
|
|
52
|
+
num_layers=2,
|
|
53
|
+
dropout_rate=0.1,
|
|
54
|
+
diff_steps=200,
|
|
55
|
+
loss_type='kl',
|
|
56
|
+
beta_end=0.01,
|
|
57
|
+
beta_schedule='linear',
|
|
58
|
+
channel_mult = 2,
|
|
59
|
+
mult=1,
|
|
60
|
+
num_preprocess_blocks=1,
|
|
61
|
+
num_preprocess_cells=3,
|
|
62
|
+
num_channels_enc=16,
|
|
63
|
+
arch_instance = 'res_mbconv',
|
|
64
|
+
num_latent_per_group=6,
|
|
65
|
+
num_channels_dec=16,
|
|
66
|
+
groups_per_scale=2,
|
|
67
|
+
num_postprocess_blocks=1,
|
|
68
|
+
num_postprocess_cells=2,
|
|
69
|
+
beta_start=0,
|
|
70
|
+
|
|
71
|
+
freq='h',
|
|
72
|
+
|
|
73
|
+
**kwargs
|
|
74
|
+
)->None:
|
|
75
|
+
super().__init__(**kwargs)
|
|
76
|
+
input_dim = self.past_channels
|
|
77
|
+
sequence_length = self.past_steps
|
|
78
|
+
prediction_length = self.future_steps
|
|
79
|
+
target_dim = self.out_channels
|
|
80
|
+
embedding_dimension = self.emb_dim
|
|
81
|
+
##pytotch lightening stuff
|
|
82
|
+
self.save_hyperparameters(logger=False)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
self.gen_net = diffusion_generate(target_dim,embedding_dimension,prediction_length,sequence_length,scale,hidden_size,num_layers,dropout_rate,diff_steps,loss_type,beta_end,beta_schedule, channel_mult,mult,
|
|
88
|
+
num_preprocess_blocks,num_preprocess_cells,num_channels_enc,arch_instance,num_latent_per_group,num_channels_dec,groups_per_scale,num_postprocess_blocks,num_postprocess_cells).to(self.device)
|
|
89
|
+
|
|
90
|
+
self.denoise_net = denoise_net(target_dim,embedding_dimension,prediction_length,sequence_length,scale,hidden_size,num_layers,dropout_rate,diff_steps,loss_type,beta_end,beta_schedule, channel_mult,mult,
|
|
91
|
+
num_preprocess_blocks,num_preprocess_cells,num_channels_enc,arch_instance,num_latent_per_group,num_channels_dec,groups_per_scale,num_postprocess_blocks,num_postprocess_cells,beta_start,input_dim,freq,self.embs_past).to(self.device)
|
|
92
|
+
self.diff_step = diff_steps
|
|
93
|
+
self.pred_net = pred_net(target_dim,embedding_dimension,prediction_length,sequence_length,scale,hidden_size,num_layers,dropout_rate,diff_steps,loss_type,beta_end,beta_schedule, channel_mult,mult,
|
|
94
|
+
num_preprocess_blocks,num_preprocess_cells,num_channels_enc,arch_instance,num_latent_per_group,num_channels_dec,groups_per_scale,num_postprocess_blocks,num_postprocess_cells,beta_start,input_dim,freq,self.embs_fut).to(self.device)
|
|
95
|
+
#self.embedding = DataEmbedding(input_dim, embedding_dimension, freq,dropout_rate)
|
|
96
|
+
|
|
97
|
+
self.psi = 0.5
|
|
98
|
+
self.gamma = 0.01
|
|
99
|
+
self.lambda1 = 1.0
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def configure_optimizers(self):
|
|
103
|
+
"""
|
|
104
|
+
Each model has optim_config and scheduler_config
|
|
105
|
+
|
|
106
|
+
:meta private:
|
|
107
|
+
"""
|
|
108
|
+
optimizer = optim.Adam(self.denoise_net.parameters(), **self.optim_config)
|
|
109
|
+
self.lr = self.optim_config['lr']
|
|
110
|
+
if self.scheduler_config is not None:
|
|
111
|
+
scheduler = StepLR(optimizer,**self.scheduler_config)
|
|
112
|
+
return [optimizer], [scheduler]
|
|
113
|
+
else:
|
|
114
|
+
return optimizer
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def training_step(self, batch, batch_idx):
|
|
119
|
+
"""
|
|
120
|
+
pythotrch lightening stuff
|
|
121
|
+
|
|
122
|
+
:meta private:
|
|
123
|
+
"""
|
|
124
|
+
sample, y_noisy,recon,loss2,total_c = self(batch)
|
|
125
|
+
##self.compute_loss(batch,y_hat)
|
|
126
|
+
mse_loss = self.loss(sample, y_noisy)
|
|
127
|
+
loss1 = - torch.mean(torch.sum(recon, dim=[1, 2, 3]))
|
|
128
|
+
loss = loss1*self.psi + loss2*self.lambda1 + mse_loss - self.gamma*total_c
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
return loss
|
|
132
|
+
|
|
133
|
+
def validation_step(self, batch, batch_idx):
|
|
134
|
+
"""
|
|
135
|
+
pythotrch lightening stuff
|
|
136
|
+
|
|
137
|
+
:meta private:
|
|
138
|
+
"""
|
|
139
|
+
|
|
140
|
+
copy_parameters(self.denoise_net, self.pred_net)
|
|
141
|
+
batch_x = batch['x_num_past'].to(self.device)
|
|
142
|
+
batch_x_mark = batch['x_cat_past'].to(self.device)
|
|
143
|
+
batch_y = batch['y'].to(self.device)
|
|
144
|
+
|
|
145
|
+
import pdb
|
|
146
|
+
pdb.set_trace()
|
|
147
|
+
_, out, _, _ = self.pred_net(batch_x, batch_x_mark)
|
|
148
|
+
mse = self.loss(out.squeeze(1), batch_y)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
return mse
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def forward(self,batch:dict)->torch.tensor:
|
|
156
|
+
|
|
157
|
+
B = batch['x_num_past'].shape[0]
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
t = torch.randint(0, self.diff_step, (B,)).long().to(self.device)
|
|
161
|
+
|
|
162
|
+
batch_x = batch['x_num_past'].to(self.device)
|
|
163
|
+
x_mark = batch['x_cat_past'].to(self.device)
|
|
164
|
+
batch_y = batch['y'].to(self.device)
|
|
165
|
+
|
|
166
|
+
output, y_noisy, total_c, _, loss2 = self.denoise_net(batch_x, x_mark, batch_y, t)
|
|
167
|
+
recon = output.log_prob(y_noisy)
|
|
168
|
+
sample = output.sample()
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
return sample,y_noisy,recon,loss2,total_c
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def inference(self, batch:dict)->torch.tensor:
|
|
180
|
+
"""Care here, we need to implement it because for predicting the N-step it will use the prediction at step N-1. TODO fix if because I did not implement the
|
|
181
|
+
know continuous variable presence here
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
batch (dict): batch of the dataloader
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
torch.tensor: result
|
|
188
|
+
"""
|
|
189
|
+
copy_parameters(self.denoise_net, self.pred_net)
|
|
190
|
+
|
|
191
|
+
batch_x = batch['x_num_past'].float().to(self.device)
|
|
192
|
+
batch_x_mark = batch['x_cat_past'].to(self.device)
|
|
193
|
+
_, out, _, _ = self.pred_net(batch_x, batch_x_mark)
|
|
194
|
+
#import pdb
|
|
195
|
+
#pdb.set_trace()
|
|
196
|
+
return torch.permute(out, (0,2,1,3))
|