pcntoolkit 0.32.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,219 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Fri Aug 30 09:45:35 2019
5
+
6
+ @author: seykia
7
+ """
8
+
9
+ import torch
10
+ from torch import nn
11
+ from torch.nn import functional as F
12
+ import numpy as np
13
+
14
+
15
+ def compute_conv_out_size(d_in, h_in, w_in, padding, dilation, kernel_size, stride, UPorDW):
16
+ if UPorDW == 'down':
17
+ d_out = np.floor(
18
+ (d_in + 2 * padding[0] - dilation * (kernel_size - 1) - 1) / stride + 1)
19
+ h_out = np.floor(
20
+ (h_in + 2 * padding[1] - dilation * (kernel_size - 1) - 1) / stride + 1)
21
+ w_out = np.floor(
22
+ (w_in + 2 * padding[2] - dilation * (kernel_size - 1) - 1) / stride + 1)
23
+ elif UPorDW == 'up':
24
+ d_out = (d_in-1) * stride - 2 * \
25
+ padding[0] + dilation * (kernel_size - 1) + 1
26
+ h_out = (h_in-1) * stride - 2 * \
27
+ padding[1] + dilation * (kernel_size - 1) + 1
28
+ w_out = (w_in-1) * stride - 2 * \
29
+ padding[2] + dilation * (kernel_size - 1) + 1
30
+ return d_out, h_out, w_out
31
+
32
+ ################################ ARCHITECTURES ################################
33
+
34
+
35
+ class Encoder(nn.Module):
36
+ def __init__(self, x, y, args):
37
+ super(Encoder, self).__init__()
38
+ self.r_dim = 25
39
+ self.r_conv_dim = 100
40
+ self.lrlu_neg_slope = 0.01
41
+ self.dp_level = 0.1
42
+
43
+ self.factor = args.m
44
+ self.x_dim = x.shape[2]
45
+
46
+ # Conv 1
47
+ self.encoder_y_layer_1_conv = nn.Conv3d(in_channels=self.factor, out_channels=self.factor,
48
+ kernel_size=5, stride=2, padding=0,
49
+ # in:(90,108,90) out:(43,52,43)
50
+ dilation=1, groups=self.factor, bias=True)
51
+ self.encoder_y_layer_1_bn = nn.BatchNorm3d(self.factor)
52
+ d_out_1, h_out_1, w_out_1 = compute_conv_out_size(y.shape[2], y.shape[3],
53
+ y.shape[4], padding=[
54
+ 0, 0, 0],
55
+ dilation=1, kernel_size=5,
56
+ stride=2, UPorDW='down')
57
+
58
+ # Conv 2
59
+ self.encoder_y_layer_2_conv = nn.Conv3d(in_channels=self.factor, out_channels=self.factor,
60
+ kernel_size=3, stride=2, padding=0,
61
+ # out: (21,25,21)
62
+ dilation=1, groups=self.factor, bias=True)
63
+ self.encoder_y_layer_2_bn = nn.BatchNorm3d(self.factor)
64
+ d_out_2, h_out_2, w_out_2 = compute_conv_out_size(d_out_1, h_out_1,
65
+ w_out_1, padding=[
66
+ 0, 0, 0],
67
+ dilation=1, kernel_size=3,
68
+ stride=2, UPorDW='down')
69
+
70
+ # Conv 3
71
+ self.encoder_y_layer_3_conv = nn.Conv3d(in_channels=self.factor, out_channels=self.factor,
72
+ kernel_size=3, stride=2, padding=0,
73
+ # out: (10,12,10)
74
+ dilation=1, groups=self.factor, bias=True)
75
+ self.encoder_y_layer_3_bn = nn.BatchNorm3d(self.factor)
76
+ d_out_3, h_out_3, w_out_3 = compute_conv_out_size(d_out_2, h_out_2,
77
+ w_out_2, padding=[
78
+ 0, 0, 0],
79
+ dilation=1, kernel_size=3,
80
+ stride=2, UPorDW='down')
81
+
82
+ # Conv 4
83
+ self.encoder_y_layer_4_conv = nn.Conv3d(in_channels=self.factor, out_channels=1,
84
+ kernel_size=3, stride=2, padding=0,
85
+ # out: (4,5,4)
86
+ dilation=1, groups=1, bias=True)
87
+ self.encoder_y_layer_4_bn = nn.BatchNorm3d(1)
88
+ d_out_4, h_out_4, w_out_4 = compute_conv_out_size(d_out_3, h_out_3,
89
+ w_out_3, padding=[
90
+ 0, 0, 0],
91
+ dilation=1, kernel_size=3,
92
+ stride=2, UPorDW='down')
93
+ self.cnn_feature_num = [1, int(d_out_4), int(h_out_4), int(w_out_4)]
94
+
95
+ # FC 5
96
+ self.encoder_y_layer_5_dp = nn.Dropout(p=self.dp_level)
97
+ self.encoder_y_layer_5_linear = nn.Linear(
98
+ int(np.prod(self.cnn_feature_num)), self.r_conv_dim)
99
+
100
+ # FC 6
101
+ self.encoder_xy_layer_6_dp = nn.Dropout(p=self.dp_level)
102
+ self.encoder_xy_layer_6_linear = nn.Linear(
103
+ self.r_conv_dim + self.x_dim, 50)
104
+
105
+ # FC 7
106
+ self.encoder_xy_layer_7_dp = nn.Dropout(p=self.dp_level)
107
+ self.encoder_xy_layer_7_linear = nn.Linear(50, self.r_dim)
108
+
109
+ def forward(self, x, y):
110
+ y = F.leaky_relu(self.encoder_y_layer_1_bn(
111
+ self.encoder_y_layer_1_conv(y)), self.lrlu_neg_slope)
112
+ y = F.leaky_relu(self.encoder_y_layer_2_bn(
113
+ self.encoder_y_layer_2_conv(y)), self.lrlu_neg_slope)
114
+ y = F.leaky_relu(self.encoder_y_layer_3_bn(
115
+ self.encoder_y_layer_3_conv(y)), self.lrlu_neg_slope)
116
+ y = F.leaky_relu(self.encoder_y_layer_4_bn(
117
+ self.encoder_y_layer_4_conv(y)), self.lrlu_neg_slope)
118
+ y = F.leaky_relu(self.encoder_y_layer_5_linear(self.encoder_y_layer_5_dp(
119
+ y.view(y.shape[0], np.prod(self.cnn_feature_num)))), self.lrlu_neg_slope)
120
+ x_y = torch.cat((y, torch.mean(x, dim=1)), 1)
121
+ x_y = F.leaky_relu(self.encoder_xy_layer_6_linear(
122
+ self.encoder_xy_layer_6_dp(x_y)), self.lrlu_neg_slope)
123
+ x_y = F.leaky_relu(self.encoder_xy_layer_7_linear(
124
+ self.encoder_xy_layer_7_dp(x_y)), self.lrlu_neg_slope)
125
+ return x_y
126
+
127
+
128
+ class Decoder(nn.Module):
129
+ def __init__(self, x, y, args):
130
+ super(Decoder, self).__init__()
131
+ self.r_dim = 25
132
+ self.r_conv_dim = 100
133
+ self.lrlu_neg_slope = 0.01
134
+ self.dp_level = 0.1
135
+ self.z_dim = 10
136
+ self.x_dim = x.shape[2]
137
+ self.cnn_feature_num = args.cnn_feature_num
138
+ self.factor = args.m
139
+
140
+ # FC 1
141
+ self.decoder_zx_layer_1_dp = nn.Dropout(p=self.dp_level)
142
+ self.decoder_zx_layer_1_linear = nn.Linear(self.z_dim + self.x_dim, 50)
143
+
144
+ # FC 2
145
+ self.decoder_zx_layer_2_dp = nn.Dropout(p=self.dp_level)
146
+ self.decoder_zx_layer_2_linear = nn.Linear(
147
+ 50, int(np.prod(self.cnn_feature_num)))
148
+
149
+ # Iconv 1
150
+ self.decoder_zx_layer_1_iconv = nn.ConvTranspose3d(in_channels=1, out_channels=self.factor,
151
+ kernel_size=3, stride=1,
152
+ padding=0, output_padding=(0, 0, 0),
153
+ groups=1, bias=True, dilation=1)
154
+ self.decoder_zx_layer_1_bn = nn.BatchNorm3d(self.factor)
155
+ d_out_4, h_out_4, w_out_4 = compute_conv_out_size(args.cnn_feature_num[1]*2,
156
+ args.cnn_feature_num[2]*2,
157
+ args.cnn_feature_num[3]*2,
158
+ padding=[0, 0, 0],
159
+ dilation=1, kernel_size=3,
160
+ stride=1, UPorDW='up')
161
+
162
+ # Iconv 2
163
+ self.decoder_zx_layer_2_iconv = nn.ConvTranspose3d(in_channels=self.factor, out_channels=self.factor,
164
+ kernel_size=3, stride=1, padding=0,
165
+ output_padding=(0, 0, 0), groups=self.factor,
166
+ bias=True, dilation=1)
167
+ self.decoder_zx_layer_2_bn = nn.BatchNorm3d(self.factor)
168
+ d_out_3, h_out_3, w_out_3 = compute_conv_out_size(d_out_4*2,
169
+ h_out_4*2,
170
+ w_out_4*2,
171
+ padding=[0, 0, 0],
172
+ dilation=1, kernel_size=3,
173
+ stride=1, UPorDW='up')
174
+ # Iconv 3
175
+ self.decoder_zx_layer_3_iconv = nn.ConvTranspose3d(in_channels=self.factor, out_channels=self.factor,
176
+ kernel_size=3, stride=1, padding=0,
177
+ output_padding=(0, 0, 0), groups=self.factor,
178
+ bias=True, dilation=1)
179
+ self.decoder_zx_layer_3_bn = nn.BatchNorm3d(self.factor)
180
+ d_out_2, h_out_2, w_out_2 = compute_conv_out_size(d_out_3*2,
181
+ h_out_3*2,
182
+ w_out_3*2,
183
+ padding=[0, 0, 0],
184
+ dilation=1, kernel_size=3,
185
+ stride=1, UPorDW='up')
186
+
187
+ # Iconv 4
188
+ self.decoder_zx_layer_4_iconv = nn.ConvTranspose3d(in_channels=self.factor, out_channels=1,
189
+ kernel_size=3, stride=1, padding=(0, 0, 0),
190
+ output_padding=(0, 0, 0), groups=1,
191
+ bias=True, dilation=1)
192
+ d_out_1, h_out_1, w_out_1 = compute_conv_out_size(d_out_2*2,
193
+ h_out_2*2,
194
+ w_out_2*2,
195
+ padding=[0, 0, 0],
196
+ dilation=1, kernel_size=3,
197
+ stride=1, UPorDW='up')
198
+
199
+ self.scaling = [y.shape[2]/d_out_1, y.shape[3]/h_out_1,
200
+ y.shape[4]/w_out_1]
201
+
202
+ def forward(self, z_sample, x_target):
203
+ z_x = torch.cat([z_sample, torch.mean(x_target, dim=1)], dim=1)
204
+ z_x = F.leaky_relu(self.decoder_zx_layer_1_linear(self.decoder_zx_layer_1_dp(z_x)),
205
+ self.lrlu_neg_slope)
206
+ z_x = F.leaky_relu(self.decoder_zx_layer_2_linear(self.decoder_zx_layer_2_dp(z_x)),
207
+ self.lrlu_neg_slope)
208
+ z_x = z_x.view(x_target.shape[0], self.cnn_feature_num[0], self.cnn_feature_num[1],
209
+ self.cnn_feature_num[2], self.cnn_feature_num[3])
210
+ z_x = F.leaky_relu(self.decoder_zx_layer_1_bn(self.decoder_zx_layer_1_iconv(
211
+ F.interpolate(z_x, scale_factor=2))), self.lrlu_neg_slope)
212
+ z_x = F.leaky_relu(self.decoder_zx_layer_2_bn(self.decoder_zx_layer_2_iconv(
213
+ F.interpolate(z_x, scale_factor=2))), self.lrlu_neg_slope)
214
+ z_x = F.leaky_relu(self.decoder_zx_layer_3_bn(self.decoder_zx_layer_3_iconv(
215
+ F.interpolate(z_x, scale_factor=2))), self.lrlu_neg_slope)
216
+ z_x = self.decoder_zx_layer_4_iconv(F.interpolate(z_x, scale_factor=2))
217
+ y_hat = torch.sigmoid(F.interpolate(z_x, scale_factor=(self.scaling[0],
218
+ self.scaling[1], self.scaling[2])))
219
+ return y_hat