Myosotis-Researches 0.0.12__py3-none-any.whl → 0.0.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- myosotis_researches/CcGAN/models_128/CcGAN_SAGAN.py +301 -0
 - myosotis_researches/CcGAN/models_128/ResNet_class_eval.py +141 -0
 - myosotis_researches/CcGAN/models_128/ResNet_embed.py +188 -0
 - myosotis_researches/CcGAN/models_128/ResNet_regre_eval.py +175 -0
 - myosotis_researches/CcGAN/models_128/__init__.py +8 -0
 - myosotis_researches/CcGAN/models_128/autoencoder.py +119 -0
 - myosotis_researches/CcGAN/models_128/cGAN_SAGAN.py +276 -0
 - myosotis_researches/CcGAN/models_128/cGAN_concat_SAGAN.py +245 -0
 - myosotis_researches/CcGAN/models_256/CcGAN_SAGAN.py +303 -0
 - myosotis_researches/CcGAN/models_256/ResNet_class_eval.py +142 -0
 - myosotis_researches/CcGAN/models_256/ResNet_embed.py +188 -0
 - myosotis_researches/CcGAN/models_256/ResNet_regre_eval.py +178 -0
 - myosotis_researches/CcGAN/models_256/__init__.py +8 -0
 - myosotis_researches/CcGAN/models_256/autoencoder.py +133 -0
 - myosotis_researches/CcGAN/models_256/cGAN_SAGAN.py +280 -0
 - myosotis_researches/CcGAN/models_256/cGAN_concat_SAGAN.py +249 -0
 - myosotis_researches/CcGAN/utils/make_h5.py +13 -9
 - {myosotis_researches-0.0.12.dist-info → myosotis_researches-0.0.14.dist-info}/METADATA +1 -1
 - myosotis_researches-0.0.14.dist-info/RECORD +28 -0
 - myosotis_researches-0.0.12.dist-info/RECORD +0 -12
 - {myosotis_researches-0.0.12.dist-info → myosotis_researches-0.0.14.dist-info}/WHEEL +0 -0
 - {myosotis_researches-0.0.12.dist-info → myosotis_researches-0.0.14.dist-info}/licenses/LICENSE +0 -0
 - {myosotis_researches-0.0.12.dist-info → myosotis_researches-0.0.14.dist-info}/top_level.txt +0 -0
 
| 
         @@ -0,0 +1,188 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            '''
         
     | 
| 
      
 2 
     | 
    
         
            +
            ResNet-based model to map an image from pixel space to a features space.
         
     | 
| 
      
 3 
     | 
    
         
            +
            Need to be pretrained on the dataset.
         
     | 
| 
      
 4 
     | 
    
         
            +
             
     | 
| 
      
 5 
     | 
    
         
            +
            if isometric_map = True, there is an extra step (elf.classifier_1 = nn.Linear(512, 32*32*3)) to increase the dimension of the feature map from 512 to 32*32*3. This selection is for desity-ratio estimation in feature space.
         
     | 
| 
      
 6 
     | 
    
         
            +
             
     | 
| 
      
 7 
     | 
    
         
            +
            codes are based on
         
     | 
| 
      
 8 
     | 
    
         
            +
            @article{
         
     | 
| 
      
 9 
     | 
    
         
            +
            zhang2018mixup,
         
     | 
| 
      
 10 
     | 
    
         
            +
            title={mixup: Beyond Empirical Risk Minimization},
         
     | 
| 
      
 11 
     | 
    
         
            +
            author={Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz},
         
     | 
| 
      
 12 
     | 
    
         
            +
            journal={International Conference on Learning Representations},
         
     | 
| 
      
 13 
     | 
    
         
            +
            year={2018},
         
     | 
| 
      
 14 
     | 
    
         
            +
            url={https://openreview.net/forum?id=r1Ddp1-Rb},
         
     | 
| 
      
 15 
     | 
    
         
            +
            }
         
     | 
| 
      
 16 
     | 
    
         
            +
            '''
         
     | 
| 
      
 17 
     | 
    
         
            +
             
     | 
| 
      
 18 
     | 
    
         
            +
             
     | 
| 
      
 19 
     | 
    
         
            +
            import torch
         
     | 
| 
      
 20 
     | 
    
         
            +
            import torch.nn as nn
         
     | 
| 
      
 21 
     | 
    
         
            +
            import torch.nn.functional as F
         
     | 
| 
      
 22 
     | 
    
         
            +
             
     | 
| 
      
 23 
     | 
    
         
            +
            NC = 3
         
     | 
| 
      
 24 
     | 
    
         
            +
            IMG_SIZE = 256
         
     | 
| 
      
 25 
     | 
    
         
            +
            DIM_EMBED = 128
         
     | 
| 
      
 26 
     | 
    
         
            +
             
     | 
| 
      
 27 
     | 
    
         
            +
             
     | 
| 
      
 28 
     | 
    
         
            +
            #------------------------------------------------------------------------------
         
     | 
| 
      
 29 
     | 
    
         
            +
            class BasicBlock(nn.Module):
         
     | 
| 
      
 30 
     | 
    
         
            +
                expansion = 1
         
     | 
| 
      
 31 
     | 
    
         
            +
             
     | 
| 
      
 32 
     | 
    
         
            +
                def __init__(self, in_planes, planes, stride=1):
         
     | 
| 
      
 33 
     | 
    
         
            +
                    super(BasicBlock, self).__init__()
         
     | 
| 
      
 34 
     | 
    
         
            +
                    self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
         
     | 
| 
      
 35 
     | 
    
         
            +
                    self.bn1 = nn.BatchNorm2d(planes)
         
     | 
| 
      
 36 
     | 
    
         
            +
                    self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
         
     | 
| 
      
 37 
     | 
    
         
            +
                    self.bn2 = nn.BatchNorm2d(planes)
         
     | 
| 
      
 38 
     | 
    
         
            +
             
     | 
| 
      
 39 
     | 
    
         
            +
                    self.shortcut = nn.Sequential()
         
     | 
| 
      
 40 
     | 
    
         
            +
                    if stride != 1 or in_planes != self.expansion*planes:
         
     | 
| 
      
 41 
     | 
    
         
            +
                        self.shortcut = nn.Sequential(
         
     | 
| 
      
 42 
     | 
    
         
            +
                            nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
         
     | 
| 
      
 43 
     | 
    
         
            +
                            nn.BatchNorm2d(self.expansion*planes)
         
     | 
| 
      
 44 
     | 
    
         
            +
                        )
         
     | 
| 
      
 45 
     | 
    
         
            +
             
     | 
| 
      
 46 
     | 
    
         
            +
                def forward(self, x):
         
     | 
| 
      
 47 
     | 
    
         
            +
                    out = F.relu(self.bn1(self.conv1(x)))
         
     | 
| 
      
 48 
     | 
    
         
            +
                    out = self.bn2(self.conv2(out))
         
     | 
| 
      
 49 
     | 
    
         
            +
                    out += self.shortcut(x)
         
     | 
| 
      
 50 
     | 
    
         
            +
                    out = F.relu(out)
         
     | 
| 
      
 51 
     | 
    
         
            +
                    return out
         
     | 
| 
      
 52 
     | 
    
         
            +
             
     | 
| 
      
 53 
     | 
    
         
            +
             
     | 
| 
      
 54 
     | 
    
         
            +
            class Bottleneck(nn.Module):
         
     | 
| 
      
 55 
     | 
    
         
            +
                expansion = 4
         
     | 
| 
      
 56 
     | 
    
         
            +
             
     | 
| 
      
 57 
     | 
    
         
            +
                def __init__(self, in_planes, planes, stride=1):
         
     | 
| 
      
 58 
     | 
    
         
            +
                    super(Bottleneck, self).__init__()
         
     | 
| 
      
 59 
     | 
    
         
            +
                    self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
         
     | 
| 
      
 60 
     | 
    
         
            +
                    self.bn1 = nn.BatchNorm2d(planes)
         
     | 
| 
      
 61 
     | 
    
         
            +
                    self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
         
     | 
| 
      
 62 
     | 
    
         
            +
                    self.bn2 = nn.BatchNorm2d(planes)
         
     | 
| 
      
 63 
     | 
    
         
            +
                    self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
         
     | 
| 
      
 64 
     | 
    
         
            +
                    self.bn3 = nn.BatchNorm2d(self.expansion*planes)
         
     | 
| 
      
 65 
     | 
    
         
            +
             
     | 
| 
      
 66 
     | 
    
         
            +
                    self.shortcut = nn.Sequential()
         
     | 
| 
      
 67 
     | 
    
         
            +
                    if stride != 1 or in_planes != self.expansion*planes:
         
     | 
| 
      
 68 
     | 
    
         
            +
                        self.shortcut = nn.Sequential(
         
     | 
| 
      
 69 
     | 
    
         
            +
                            nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
         
     | 
| 
      
 70 
     | 
    
         
            +
                            nn.BatchNorm2d(self.expansion*planes)
         
     | 
| 
      
 71 
     | 
    
         
            +
                        )
         
     | 
| 
      
 72 
     | 
    
         
            +
             
     | 
| 
      
 73 
     | 
    
         
            +
                def forward(self, x):
         
     | 
| 
      
 74 
     | 
    
         
            +
                    out = F.relu(self.bn1(self.conv1(x)))
         
     | 
| 
      
 75 
     | 
    
         
            +
                    out = F.relu(self.bn2(self.conv2(out)))
         
     | 
| 
      
 76 
     | 
    
         
            +
                    out = self.bn3(self.conv3(out))
         
     | 
| 
      
 77 
     | 
    
         
            +
                    out += self.shortcut(x)
         
     | 
| 
      
 78 
     | 
    
         
            +
                    out = F.relu(out)
         
     | 
| 
      
 79 
     | 
    
         
            +
                    return out
         
     | 
| 
      
 80 
     | 
    
         
            +
             
     | 
| 
      
 81 
     | 
    
         
            +
             
     | 
| 
      
 82 
     | 
    
         
            +
            class ResNet_embed(nn.Module):
         
     | 
| 
      
 83 
     | 
    
         
            +
                def __init__(self, block, num_blocks, nc=NC, dim_embed=DIM_EMBED):
         
     | 
| 
      
 84 
     | 
    
         
            +
                    super(ResNet_embed, self).__init__()
         
     | 
| 
      
 85 
     | 
    
         
            +
                    self.in_planes = 64
         
     | 
| 
      
 86 
     | 
    
         
            +
             
     | 
| 
      
 87 
     | 
    
         
            +
                    self.main = nn.Sequential(
         
     | 
| 
      
 88 
     | 
    
         
            +
                        nn.Conv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False),  # h=h 256
         
     | 
| 
      
 89 
     | 
    
         
            +
                        nn.BatchNorm2d(64),
         
     | 
| 
      
 90 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 91 
     | 
    
         
            +
                        nn.MaxPool2d(2,2), #h=h/2 128
         
     | 
| 
      
 92 
     | 
    
         
            +
                        # self._make_layer(block, 64, num_blocks[0], stride=1),  # h=h
         
     | 
| 
      
 93 
     | 
    
         
            +
                        self._make_layer(block, 64, num_blocks[0], stride=2),  # h=h/2 64
         
     | 
| 
      
 94 
     | 
    
         
            +
                        nn.MaxPool2d(2,2), # 32
         
     | 
| 
      
 95 
     | 
    
         
            +
                        self._make_layer(block, 128, num_blocks[1], stride=2), # h=h/2 16
         
     | 
| 
      
 96 
     | 
    
         
            +
                        self._make_layer(block, 256, num_blocks[2], stride=2), # h=h/2 8
         
     | 
| 
      
 97 
     | 
    
         
            +
                        self._make_layer(block, 512, num_blocks[3], stride=2), # h=h/2 4
         
     | 
| 
      
 98 
     | 
    
         
            +
                        # nn.AvgPool2d(kernel_size=4)
         
     | 
| 
      
 99 
     | 
    
         
            +
                        nn.AdaptiveAvgPool2d((1, 1))
         
     | 
| 
      
 100 
     | 
    
         
            +
                    )
         
     | 
| 
      
 101 
     | 
    
         
            +
             
     | 
| 
      
 102 
     | 
    
         
            +
                    self.x2h_res = nn.Sequential(
         
     | 
| 
      
 103 
     | 
    
         
            +
                        nn.Linear(512, 512),
         
     | 
| 
      
 104 
     | 
    
         
            +
                        nn.BatchNorm1d(512),
         
     | 
| 
      
 105 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 106 
     | 
    
         
            +
             
     | 
| 
      
 107 
     | 
    
         
            +
                        nn.Linear(512, dim_embed),
         
     | 
| 
      
 108 
     | 
    
         
            +
                        nn.BatchNorm1d(dim_embed),
         
     | 
| 
      
 109 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 110 
     | 
    
         
            +
                    )
         
     | 
| 
      
 111 
     | 
    
         
            +
             
     | 
| 
      
 112 
     | 
    
         
            +
                    self.h2y = nn.Sequential(
         
     | 
| 
      
 113 
     | 
    
         
            +
                        nn.Linear(dim_embed, 1),
         
     | 
| 
      
 114 
     | 
    
         
            +
                        nn.ReLU()
         
     | 
| 
      
 115 
     | 
    
         
            +
                    )
         
     | 
| 
      
 116 
     | 
    
         
            +
             
     | 
| 
      
 117 
     | 
    
         
            +
                def _make_layer(self, block, planes, num_blocks, stride):
         
     | 
| 
      
 118 
     | 
    
         
            +
                    strides = [stride] + [1]*(num_blocks-1)
         
     | 
| 
      
 119 
     | 
    
         
            +
                    layers = []
         
     | 
| 
      
 120 
     | 
    
         
            +
                    for stride in strides:
         
     | 
| 
      
 121 
     | 
    
         
            +
                        layers.append(block(self.in_planes, planes, stride))
         
     | 
| 
      
 122 
     | 
    
         
            +
                        self.in_planes = planes * block.expansion
         
     | 
| 
      
 123 
     | 
    
         
            +
                    return nn.Sequential(*layers)
         
     | 
| 
      
 124 
     | 
    
         
            +
             
     | 
| 
      
 125 
     | 
    
         
            +
                def forward(self, x):
         
     | 
| 
      
 126 
     | 
    
         
            +
             
     | 
| 
      
 127 
     | 
    
         
            +
                    features = self.main(x)
         
     | 
| 
      
 128 
     | 
    
         
            +
                    features = features.view(features.size(0), -1)
         
     | 
| 
      
 129 
     | 
    
         
            +
                    features = self.x2h_res(features)
         
     | 
| 
      
 130 
     | 
    
         
            +
                    out = self.h2y(features)
         
     | 
| 
      
 131 
     | 
    
         
            +
             
     | 
| 
      
 132 
     | 
    
         
            +
                    return out, features
         
     | 
| 
      
 133 
     | 
    
         
            +
             
     | 
| 
      
 134 
     | 
    
         
            +
             
     | 
| 
      
 135 
     | 
    
         
            +
            def ResNet18_embed(dim_embed=DIM_EMBED):
         
     | 
| 
      
 136 
     | 
    
         
            +
                return ResNet_embed(BasicBlock, [2,2,2,2], dim_embed=dim_embed)
         
     | 
| 
      
 137 
     | 
    
         
            +
             
     | 
| 
      
 138 
     | 
    
         
            +
            def ResNet34_embed(dim_embed=DIM_EMBED):
         
     | 
| 
      
 139 
     | 
    
         
            +
                return ResNet_embed(BasicBlock, [3,4,6,3], dim_embed=dim_embed)
         
     | 
| 
      
 140 
     | 
    
         
            +
             
     | 
| 
      
 141 
     | 
    
         
            +
            def ResNet50_embed(dim_embed=DIM_EMBED):
         
     | 
| 
      
 142 
     | 
    
         
            +
                return ResNet_embed(Bottleneck, [3,4,6,3], dim_embed=dim_embed)
         
     | 
| 
      
 143 
     | 
    
         
            +
             
     | 
| 
      
 144 
     | 
    
         
            +
            #------------------------------------------------------------------------------
         
     | 
| 
      
 145 
     | 
    
         
            +
            # map labels to the embedding space
         
     | 
| 
      
 146 
     | 
    
         
            +
            class model_y2h(nn.Module):
         
     | 
| 
      
 147 
     | 
    
         
            +
                def __init__(self, dim_embed=DIM_EMBED):
         
     | 
| 
      
 148 
     | 
    
         
            +
                    super(model_y2h, self).__init__()
         
     | 
| 
      
 149 
     | 
    
         
            +
                    self.main = nn.Sequential(
         
     | 
| 
      
 150 
     | 
    
         
            +
                        nn.Linear(1, dim_embed),
         
     | 
| 
      
 151 
     | 
    
         
            +
                        # nn.BatchNorm1d(dim_embed),
         
     | 
| 
      
 152 
     | 
    
         
            +
                        nn.GroupNorm(8, dim_embed),
         
     | 
| 
      
 153 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 154 
     | 
    
         
            +
             
     | 
| 
      
 155 
     | 
    
         
            +
                        nn.Linear(dim_embed, dim_embed),
         
     | 
| 
      
 156 
     | 
    
         
            +
                        # nn.BatchNorm1d(dim_embed),
         
     | 
| 
      
 157 
     | 
    
         
            +
                        nn.GroupNorm(8, dim_embed),
         
     | 
| 
      
 158 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 159 
     | 
    
         
            +
             
     | 
| 
      
 160 
     | 
    
         
            +
                        nn.Linear(dim_embed, dim_embed),
         
     | 
| 
      
 161 
     | 
    
         
            +
                        # nn.BatchNorm1d(dim_embed),
         
     | 
| 
      
 162 
     | 
    
         
            +
                        nn.GroupNorm(8, dim_embed),
         
     | 
| 
      
 163 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 164 
     | 
    
         
            +
             
     | 
| 
      
 165 
     | 
    
         
            +
                        nn.Linear(dim_embed, dim_embed),
         
     | 
| 
      
 166 
     | 
    
         
            +
                        # nn.BatchNorm1d(dim_embed),
         
     | 
| 
      
 167 
     | 
    
         
            +
                        nn.GroupNorm(8, dim_embed),
         
     | 
| 
      
 168 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 169 
     | 
    
         
            +
             
     | 
| 
      
 170 
     | 
    
         
            +
                        nn.Linear(dim_embed, dim_embed),
         
     | 
| 
      
 171 
     | 
    
         
            +
                        nn.ReLU()
         
     | 
| 
      
 172 
     | 
    
         
            +
                    )
         
     | 
| 
      
 173 
     | 
    
         
            +
             
     | 
| 
      
 174 
     | 
    
         
            +
                def forward(self, y):
         
     | 
| 
      
 175 
     | 
    
         
            +
                    y = y.view(-1, 1) +1e-8
         
     | 
| 
      
 176 
     | 
    
         
            +
                    # y = torch.exp(y.view(-1, 1))
         
     | 
| 
      
 177 
     | 
    
         
            +
                    return self.main(y)
         
     | 
| 
      
 178 
     | 
    
         
            +
             
     | 
| 
      
 179 
     | 
    
         
            +
             
     | 
| 
      
 180 
     | 
    
         
            +
             
     | 
| 
      
 181 
     | 
    
         
            +
            if __name__ == "__main__":
         
     | 
| 
      
 182 
     | 
    
         
            +
                net = ResNet34_embed(dim_embed=128).cuda()
         
     | 
| 
      
 183 
     | 
    
         
            +
                x = torch.randn(16,NC,IMG_SIZE,IMG_SIZE).cuda()
         
     | 
| 
      
 184 
     | 
    
         
            +
                out, features = net(x)
         
     | 
| 
      
 185 
     | 
    
         
            +
                print(out.size())
         
     | 
| 
      
 186 
     | 
    
         
            +
                print(features.size())
         
     | 
| 
      
 187 
     | 
    
         
            +
             
     | 
| 
      
 188 
     | 
    
         
            +
                net_y2h = model_y2h()
         
     | 
| 
         @@ -0,0 +1,178 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            '''
         
     | 
| 
      
 2 
     | 
    
         
            +
            codes are based on
         
     | 
| 
      
 3 
     | 
    
         
            +
            @article{
         
     | 
| 
      
 4 
     | 
    
         
            +
            zhang2018mixup,
         
     | 
| 
      
 5 
     | 
    
         
            +
            title={mixup: Beyond Empirical Risk Minimization},
         
     | 
| 
      
 6 
     | 
    
         
            +
            author={Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz},
         
     | 
| 
      
 7 
     | 
    
         
            +
            journal={International Conference on Learning Representations},
         
     | 
| 
      
 8 
     | 
    
         
            +
            year={2018},
         
     | 
| 
      
 9 
     | 
    
         
            +
            url={https://openreview.net/forum?id=r1Ddp1-Rb},
         
     | 
| 
      
 10 
     | 
    
         
            +
            }
         
     | 
| 
      
 11 
     | 
    
         
            +
            '''
         
     | 
| 
      
 12 
     | 
    
         
            +
             
     | 
| 
      
 13 
     | 
    
         
            +
             
     | 
| 
      
 14 
     | 
    
         
            +
            import torch
         
     | 
| 
      
 15 
     | 
    
         
            +
            import torch.nn as nn
         
     | 
| 
      
 16 
     | 
    
         
            +
            import torch.nn.functional as F
         
     | 
| 
      
 17 
     | 
    
         
            +
             
     | 
| 
      
 18 
     | 
    
         
            +
            NC = 3
         
     | 
| 
      
 19 
     | 
    
         
            +
            IMG_SIZE = 256
         
     | 
| 
      
 20 
     | 
    
         
            +
             
     | 
| 
      
 21 
     | 
    
         
            +
             
     | 
| 
      
 22 
     | 
    
         
            +
            class BasicBlock(nn.Module):
         
     | 
| 
      
 23 
     | 
    
         
            +
                expansion = 1
         
     | 
| 
      
 24 
     | 
    
         
            +
             
     | 
| 
      
 25 
     | 
    
         
            +
                def __init__(self, in_planes, planes, stride=1):
         
     | 
| 
      
 26 
     | 
    
         
            +
                    super(BasicBlock, self).__init__()
         
     | 
| 
      
 27 
     | 
    
         
            +
                    self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
         
     | 
| 
      
 28 
     | 
    
         
            +
                    self.bn1 = nn.BatchNorm2d(planes)
         
     | 
| 
      
 29 
     | 
    
         
            +
                    self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
         
     | 
| 
      
 30 
     | 
    
         
            +
                    self.bn2 = nn.BatchNorm2d(planes)
         
     | 
| 
      
 31 
     | 
    
         
            +
             
     | 
| 
      
 32 
     | 
    
         
            +
                    self.shortcut = nn.Sequential()
         
     | 
| 
      
 33 
     | 
    
         
            +
                    if stride != 1 or in_planes != self.expansion*planes:
         
     | 
| 
      
 34 
     | 
    
         
            +
                        self.shortcut = nn.Sequential(
         
     | 
| 
      
 35 
     | 
    
         
            +
                            nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
         
     | 
| 
      
 36 
     | 
    
         
            +
                            nn.BatchNorm2d(self.expansion*planes)
         
     | 
| 
      
 37 
     | 
    
         
            +
                        )
         
     | 
| 
      
 38 
     | 
    
         
            +
             
     | 
| 
      
 39 
     | 
    
         
            +
                def forward(self, x):
         
     | 
| 
      
 40 
     | 
    
         
            +
                    out = F.relu(self.bn1(self.conv1(x)))
         
     | 
| 
      
 41 
     | 
    
         
            +
                    out = self.bn2(self.conv2(out))
         
     | 
| 
      
 42 
     | 
    
         
            +
                    out += self.shortcut(x)
         
     | 
| 
      
 43 
     | 
    
         
            +
                    out = F.relu(out)
         
     | 
| 
      
 44 
     | 
    
         
            +
                    return out
         
     | 
| 
      
 45 
     | 
    
         
            +
             
     | 
| 
      
 46 
     | 
    
         
            +
             
     | 
| 
      
 47 
     | 
    
         
            +
            class Bottleneck(nn.Module):
         
     | 
| 
      
 48 
     | 
    
         
            +
                expansion = 4
         
     | 
| 
      
 49 
     | 
    
         
            +
             
     | 
| 
      
 50 
     | 
    
         
            +
                def __init__(self, in_planes, planes, stride=1):
         
     | 
| 
      
 51 
     | 
    
         
            +
                    super(Bottleneck, self).__init__()
         
     | 
| 
      
 52 
     | 
    
         
            +
                    self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
         
     | 
| 
      
 53 
     | 
    
         
            +
                    self.bn1 = nn.BatchNorm2d(planes)
         
     | 
| 
      
 54 
     | 
    
         
            +
                    self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
         
     | 
| 
      
 55 
     | 
    
         
            +
                    self.bn2 = nn.BatchNorm2d(planes)
         
     | 
| 
      
 56 
     | 
    
         
            +
                    self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
         
     | 
| 
      
 57 
     | 
    
         
            +
                    self.bn3 = nn.BatchNorm2d(self.expansion*planes)
         
     | 
| 
      
 58 
     | 
    
         
            +
             
     | 
| 
      
 59 
     | 
    
         
            +
                    self.shortcut = nn.Sequential()
         
     | 
| 
      
 60 
     | 
    
         
            +
                    if stride != 1 or in_planes != self.expansion*planes:
         
     | 
| 
      
 61 
     | 
    
         
            +
                        self.shortcut = nn.Sequential(
         
     | 
| 
      
 62 
     | 
    
         
            +
                            nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
         
     | 
| 
      
 63 
     | 
    
         
            +
                            nn.BatchNorm2d(self.expansion*planes)
         
     | 
| 
      
 64 
     | 
    
         
            +
                        )
         
     | 
| 
      
 65 
     | 
    
         
            +
             
     | 
| 
      
 66 
     | 
    
         
            +
                def forward(self, x):
         
     | 
| 
      
 67 
     | 
    
         
            +
                    out = F.relu(self.bn1(self.conv1(x)))
         
     | 
| 
      
 68 
     | 
    
         
            +
                    out = F.relu(self.bn2(self.conv2(out)))
         
     | 
| 
      
 69 
     | 
    
         
            +
                    out = self.bn3(self.conv3(out))
         
     | 
| 
      
 70 
     | 
    
         
            +
                    out += self.shortcut(x)
         
     | 
| 
      
 71 
     | 
    
         
            +
                    out = F.relu(out)
         
     | 
| 
      
 72 
     | 
    
         
            +
                    return out
         
     | 
| 
      
 73 
     | 
    
         
            +
             
     | 
| 
      
 74 
     | 
    
         
            +
             
     | 
| 
      
 75 
     | 
    
         
            +
            class ResNet_regre_eval(nn.Module):
         
     | 
| 
      
 76 
     | 
    
         
            +
                def __init__(self, block, num_blocks, nc=NC, ngpu = 1, feature_layer='f3'):
         
     | 
| 
      
 77 
     | 
    
         
            +
                    super(ResNet_regre_eval, self).__init__()
         
     | 
| 
      
 78 
     | 
    
         
            +
                    self.in_planes = 64
         
     | 
| 
      
 79 
     | 
    
         
            +
                    self.ngpu = ngpu
         
     | 
| 
      
 80 
     | 
    
         
            +
                    self.feature_layer=feature_layer
         
     | 
| 
      
 81 
     | 
    
         
            +
             
     | 
| 
      
 82 
     | 
    
         
            +
                    self.block1 = nn.Sequential(
         
     | 
| 
      
 83 
     | 
    
         
            +
                        nn.Conv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False),  # h=h
         
     | 
| 
      
 84 
     | 
    
         
            +
                        nn.BatchNorm2d(64),
         
     | 
| 
      
 85 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 86 
     | 
    
         
            +
                        nn.MaxPool2d(2,2), #h=h/2 128
         
     | 
| 
      
 87 
     | 
    
         
            +
                        self._make_layer(block, 64, num_blocks[0], stride=2),  # h=h/2 64
         
     | 
| 
      
 88 
     | 
    
         
            +
                    )
         
     | 
| 
      
 89 
     | 
    
         
            +
                    self.block2 = nn.Sequential(
         
     | 
| 
      
 90 
     | 
    
         
            +
                        nn.MaxPool2d(2,2), #h=h/2 32
         
     | 
| 
      
 91 
     | 
    
         
            +
                        self._make_layer(block, 128, num_blocks[1], stride=2) # h=h/2 16
         
     | 
| 
      
 92 
     | 
    
         
            +
                    )
         
     | 
| 
      
 93 
     | 
    
         
            +
                    self.block3 = self._make_layer(block, 256, num_blocks[2], stride=2) # h=h/2 8
         
     | 
| 
      
 94 
     | 
    
         
            +
                    self.block4 = self._make_layer(block, 512, num_blocks[3], stride=2) # h=h/2 4
         
     | 
| 
      
 95 
     | 
    
         
            +
             
     | 
| 
      
 96 
     | 
    
         
            +
                    self.pool1 = nn.AvgPool2d(kernel_size=4)
         
     | 
| 
      
 97 
     | 
    
         
            +
                    if self.feature_layer == 'f2':
         
     | 
| 
      
 98 
     | 
    
         
            +
                        self.pool2 = nn.AdaptiveAvgPool2d((2,2))
         
     | 
| 
      
 99 
     | 
    
         
            +
                    elif self.feature_layer == 'f3':
         
     | 
| 
      
 100 
     | 
    
         
            +
                        self.pool2 = nn.AdaptiveAvgPool2d((2,2))
         
     | 
| 
      
 101 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 102 
     | 
    
         
            +
                        self.pool2 = nn.AdaptiveAvgPool2d((1,1))
         
     | 
| 
      
 103 
     | 
    
         
            +
             
     | 
| 
      
 104 
     | 
    
         
            +
                    linear_layers = [
         
     | 
| 
      
 105 
     | 
    
         
            +
                            nn.Linear(512*block.expansion, 128),
         
     | 
| 
      
 106 
     | 
    
         
            +
                            nn.BatchNorm1d(128),
         
     | 
| 
      
 107 
     | 
    
         
            +
                            nn.ReLU(),
         
     | 
| 
      
 108 
     | 
    
         
            +
                            nn.Linear(128, 128),
         
     | 
| 
      
 109 
     | 
    
         
            +
                            nn.BatchNorm1d(128),
         
     | 
| 
      
 110 
     | 
    
         
            +
                            nn.ReLU(),
         
     | 
| 
      
 111 
     | 
    
         
            +
                            nn.Linear(128, 1),
         
     | 
| 
      
 112 
     | 
    
         
            +
                            # nn.Sigmoid()
         
     | 
| 
      
 113 
     | 
    
         
            +
                            nn.ReLU(),
         
     | 
| 
      
 114 
     | 
    
         
            +
                        ]
         
     | 
| 
      
 115 
     | 
    
         
            +
                    self.linear = nn.Sequential(*linear_layers)
         
     | 
| 
      
 116 
     | 
    
         
            +
             
     | 
| 
      
 117 
     | 
    
         
            +
             
     | 
| 
      
 118 
     | 
    
         
            +
                def _make_layer(self, block, planes, num_blocks, stride):
         
     | 
| 
      
 119 
     | 
    
         
            +
                    strides = [stride] + [1]*(num_blocks-1)
         
     | 
| 
      
 120 
     | 
    
         
            +
                    layers = []
         
     | 
| 
      
 121 
     | 
    
         
            +
                    for stride in strides:
         
     | 
| 
      
 122 
     | 
    
         
            +
                        layers.append(block(self.in_planes, planes, stride))
         
     | 
| 
      
 123 
     | 
    
         
            +
                        self.in_planes = planes * block.expansion
         
     | 
| 
      
 124 
     | 
    
         
            +
                    return nn.Sequential(*layers)
         
     | 
| 
      
 125 
     | 
    
         
            +
             
     | 
| 
      
 126 
     | 
    
         
            +
                def forward(self, x):
         
     | 
| 
      
 127 
     | 
    
         
            +
             
     | 
| 
      
 128 
     | 
    
         
            +
                    if x.is_cuda and self.ngpu > 1:
         
     | 
| 
      
 129 
     | 
    
         
            +
                        ft1 = nn.parallel.data_parallel(self.block1, x, range(self.ngpu))
         
     | 
| 
      
 130 
     | 
    
         
            +
                        ft2 = nn.parallel.data_parallel(self.block2, ft1, range(self.ngpu))
         
     | 
| 
      
 131 
     | 
    
         
            +
                        ft3 = nn.parallel.data_parallel(self.block3, ft2, range(self.ngpu))
         
     | 
| 
      
 132 
     | 
    
         
            +
                        ft4 = nn.parallel.data_parallel(self.block4, ft3, range(self.ngpu))
         
     | 
| 
      
 133 
     | 
    
         
            +
                        out = nn.parallel.data_parallel(self.pool1, ft4, range(self.ngpu))
         
     | 
| 
      
 134 
     | 
    
         
            +
                        out = out.view(out.size(0), -1)
         
     | 
| 
      
 135 
     | 
    
         
            +
                        out = nn.parallel.data_parallel(self.linear, out, range(self.ngpu))
         
     | 
| 
      
 136 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 137 
     | 
    
         
            +
                        ft1 = self.block1(x)
         
     | 
| 
      
 138 
     | 
    
         
            +
                        ft2 = self.block2(ft1)
         
     | 
| 
      
 139 
     | 
    
         
            +
                        ft3 = self.block3(ft2)
         
     | 
| 
      
 140 
     | 
    
         
            +
                        ft4 = self.block4(ft3)
         
     | 
| 
      
 141 
     | 
    
         
            +
                        out = self.pool1(ft4)
         
     | 
| 
      
 142 
     | 
    
         
            +
                        out = out.view(out.size(0), -1)
         
     | 
| 
      
 143 
     | 
    
         
            +
                        out = self.linear(out)
         
     | 
| 
      
 144 
     | 
    
         
            +
             
     | 
| 
      
 145 
     | 
    
         
            +
                    if self.feature_layer == 'f2':
         
     | 
| 
      
 146 
     | 
    
         
            +
                        ext_features = self.pool2(ft2)
         
     | 
| 
      
 147 
     | 
    
         
            +
                    elif self.feature_layer == 'f3':
         
     | 
| 
      
 148 
     | 
    
         
            +
                        ext_features = self.pool2(ft3)
         
     | 
| 
      
 149 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 150 
     | 
    
         
            +
                        ext_features = self.pool2(ft4)
         
     | 
| 
      
 151 
     | 
    
         
            +
             
     | 
| 
      
 152 
     | 
    
         
            +
                    ext_features = ext_features.view(ext_features.size(0), -1)
         
     | 
| 
      
 153 
     | 
    
         
            +
             
     | 
| 
      
 154 
     | 
    
         
            +
                    return out, ext_features
         
     | 
| 
      
 155 
     | 
    
         
            +
             
     | 
| 
      
 156 
     | 
    
         
            +
             
     | 
| 
      
 157 
     | 
    
         
            +
            def ResNet18_regre_eval(ngpu = 1):
         
     | 
| 
      
 158 
     | 
    
         
            +
                return ResNet_regre_eval(BasicBlock, [2,2,2,2], ngpu = ngpu)
         
     | 
| 
      
 159 
     | 
    
         
            +
             
     | 
| 
      
 160 
     | 
    
         
            +
            def ResNet34_regre_eval(ngpu = 1):
         
     | 
| 
      
 161 
     | 
    
         
            +
                return ResNet_regre_eval(BasicBlock, [3,4,6,3], ngpu = ngpu)
         
     | 
| 
      
 162 
     | 
    
         
            +
             
     | 
| 
      
 163 
     | 
    
         
            +
            def ResNet50_regre_eval(ngpu = 1):
         
     | 
| 
      
 164 
     | 
    
         
            +
                return ResNet_regre_eval(Bottleneck, [3,4,6,3], ngpu = ngpu)
         
     | 
| 
      
 165 
     | 
    
         
            +
             
     | 
| 
      
 166 
     | 
    
         
            +
            def ResNet101_regre_eval(ngpu = 1):
         
     | 
| 
      
 167 
     | 
    
         
            +
                return ResNet_regre_eval(Bottleneck, [3,4,23,3], ngpu = ngpu)
         
     | 
| 
      
 168 
     | 
    
         
            +
             
     | 
| 
      
 169 
     | 
    
         
            +
            def ResNet152_regre_eval(ngpu = 1):
         
     | 
| 
      
 170 
     | 
    
         
            +
                return ResNet_regre_eval(Bottleneck, [3,8,36,3], ngpu = ngpu)
         
     | 
| 
      
 171 
     | 
    
         
            +
             
     | 
| 
      
 172 
     | 
    
         
            +
             
     | 
| 
      
 173 
     | 
    
         
            +
            if __name__ == "__main__":
         
     | 
| 
      
 174 
     | 
    
         
            +
                net = ResNet34_regre_eval(ngpu = 1).cuda()
         
     | 
| 
      
 175 
     | 
    
         
            +
                x = torch.randn(4,NC,IMG_SIZE,IMG_SIZE).cuda()
         
     | 
| 
      
 176 
     | 
    
         
            +
                out, features = net(x)
         
     | 
| 
      
 177 
     | 
    
         
            +
                print(out.size())
         
     | 
| 
      
 178 
     | 
    
         
            +
                print(features.size())
         
     | 
| 
         @@ -0,0 +1,8 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            from .autoencoder import *
         
     | 
| 
      
 2 
     | 
    
         
            +
            from .cGAN_SAGAN import cGAN_SAGAN_Generator, cGAN_SAGAN_Discriminator
         
     | 
| 
      
 3 
     | 
    
         
            +
            from .cGAN_concat_SAGAN import cGAN_concat_SAGAN_Generator, cGAN_concat_SAGAN_Discriminator
         
     | 
| 
      
 4 
     | 
    
         
            +
            from .CcGAN_SAGAN import CcGAN_SAGAN_Generator, CcGAN_SAGAN_Discriminator
         
     | 
| 
      
 5 
     | 
    
         
            +
            from .ResNet_embed import ResNet18_embed, ResNet34_embed, ResNet50_embed, model_y2h
         
     | 
| 
      
 6 
     | 
    
         
            +
            from .ResNet_regre_eval import ResNet34_regre_eval
         
     | 
| 
      
 7 
     | 
    
         
            +
            from .ResNet_class_eval import ResNet34_class_eval
         
     | 
| 
      
 8 
     | 
    
         
            +
             
     | 
| 
         @@ -0,0 +1,133 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            import torch
         
     | 
| 
      
 2 
     | 
    
         
            +
            from torch import nn
         
     | 
| 
      
 3 
     | 
    
         
            +
             
     | 
| 
      
 4 
     | 
    
         
            +
             
     | 
| 
      
 5 
     | 
    
         
            +
             
     | 
| 
      
 6 
     | 
    
         
            +
            class encoder(nn.Module):
         
     | 
| 
      
 7 
     | 
    
         
            +
                def __init__(self, dim_bottleneck=512, ch=64):
         
     | 
| 
      
 8 
     | 
    
         
            +
                    super(encoder, self).__init__()
         
     | 
| 
      
 9 
     | 
    
         
            +
                    self.ch = ch
         
     | 
| 
      
 10 
     | 
    
         
            +
                    self.dim_bottleneck = dim_bottleneck
         
     | 
| 
      
 11 
     | 
    
         
            +
             
     | 
| 
      
 12 
     | 
    
         
            +
                    self.conv = nn.Sequential(
         
     | 
| 
      
 13 
     | 
    
         
            +
                        nn.Conv2d(3, ch, kernel_size=4, stride=2, padding=1), #h=h/2; 128
         
     | 
| 
      
 14 
     | 
    
         
            +
                        nn.BatchNorm2d(ch),
         
     | 
| 
      
 15 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 16 
     | 
    
         
            +
                        nn.MaxPool2d(2,2), #h=h/2; 64
         
     | 
| 
      
 17 
     | 
    
         
            +
                        nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1), #h=h
         
     | 
| 
      
 18 
     | 
    
         
            +
                        nn.BatchNorm2d(ch),
         
     | 
| 
      
 19 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 20 
     | 
    
         
            +
             
     | 
| 
      
 21 
     | 
    
         
            +
                        nn.Conv2d(ch, ch, kernel_size=4, stride=2, padding=1), #h=h/2; 32
         
     | 
| 
      
 22 
     | 
    
         
            +
                        nn.BatchNorm2d(ch),
         
     | 
| 
      
 23 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 24 
     | 
    
         
            +
                        nn.Conv2d(ch, ch*2, kernel_size=3, stride=1, padding=1), #h=h
         
     | 
| 
      
 25 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*2),
         
     | 
| 
      
 26 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 27 
     | 
    
         
            +
             
     | 
| 
      
 28 
     | 
    
         
            +
                        nn.Conv2d(ch*2, ch*2, kernel_size=4, stride=2, padding=1), #h=h/2; 16
         
     | 
| 
      
 29 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*2),
         
     | 
| 
      
 30 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 31 
     | 
    
         
            +
                        nn.Conv2d(ch*2, ch*4, kernel_size=3, stride=1, padding=1), #h=h
         
     | 
| 
      
 32 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*4),
         
     | 
| 
      
 33 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 34 
     | 
    
         
            +
             
     | 
| 
      
 35 
     | 
    
         
            +
                        nn.Conv2d(ch*4, ch*4, kernel_size=4, stride=2, padding=1), #h=h/2; 8
         
     | 
| 
      
 36 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*4),
         
     | 
| 
      
 37 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 38 
     | 
    
         
            +
                        nn.Conv2d(ch*4, ch*8, kernel_size=3, stride=1, padding=1), #h=h
         
     | 
| 
      
 39 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*8),
         
     | 
| 
      
 40 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 41 
     | 
    
         
            +
             
     | 
| 
      
 42 
     | 
    
         
            +
                        nn.Conv2d(ch*8, ch*8, kernel_size=4, stride=2, padding=1), #h=h/2; 4
         
     | 
| 
      
 43 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*8),
         
     | 
| 
      
 44 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 45 
     | 
    
         
            +
                        nn.Conv2d(ch*8, ch*16, kernel_size=3, stride=1, padding=1), #h=h
         
     | 
| 
      
 46 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*16),
         
     | 
| 
      
 47 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 48 
     | 
    
         
            +
                    )
         
     | 
| 
      
 49 
     | 
    
         
            +
             
     | 
| 
      
 50 
     | 
    
         
            +
                    self.linear = nn.Sequential(
         
     | 
| 
      
 51 
     | 
    
         
            +
                        nn.Linear(ch*16*4*4, dim_bottleneck),
         
     | 
| 
      
 52 
     | 
    
         
            +
                        # nn.ReLU()
         
     | 
| 
      
 53 
     | 
    
         
            +
                    )
         
     | 
| 
      
 54 
     | 
    
         
            +
             
     | 
| 
      
 55 
     | 
    
         
            +
                def forward(self, x):
         
     | 
| 
      
 56 
     | 
    
         
            +
                    feature = self.conv(x)
         
     | 
| 
      
 57 
     | 
    
         
            +
                    feature = feature.view(-1, self.ch*16*4*4)
         
     | 
| 
      
 58 
     | 
    
         
            +
                    feature = self.linear(feature)
         
     | 
| 
      
 59 
     | 
    
         
            +
                    return feature
         
     | 
| 
      
 60 
     | 
    
         
            +
             
     | 
| 
      
 61 
     | 
    
         
            +
             
     | 
| 
      
 62 
     | 
    
         
            +
             
     | 
| 
      
 63 
     | 
    
         
            +
            class decoder(nn.Module):
         
     | 
| 
      
 64 
     | 
    
         
            +
                def __init__(self, dim_bottleneck=512, ch=64):
         
     | 
| 
      
 65 
     | 
    
         
            +
                    super(decoder, self).__init__()
         
     | 
| 
      
 66 
     | 
    
         
            +
                    self.ch = ch
         
     | 
| 
      
 67 
     | 
    
         
            +
                    self.dim_bottleneck = dim_bottleneck
         
     | 
| 
      
 68 
     | 
    
         
            +
             
     | 
| 
      
 69 
     | 
    
         
            +
                    self.linear = nn.Sequential(
         
     | 
| 
      
 70 
     | 
    
         
            +
                        nn.Linear(dim_bottleneck, ch*16*4*4)
         
     | 
| 
      
 71 
     | 
    
         
            +
                    )
         
     | 
| 
      
 72 
     | 
    
         
            +
             
     | 
| 
      
 73 
     | 
    
         
            +
                    self.deconv = nn.Sequential(
         
     | 
| 
      
 74 
     | 
    
         
            +
                        nn.ConvTranspose2d(ch*16, ch*16, kernel_size=4, stride=2, padding=1), #h=2h; 8
         
     | 
| 
      
 75 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*16),
         
     | 
| 
      
 76 
     | 
    
         
            +
                        nn.ReLU(True),
         
     | 
| 
      
 77 
     | 
    
         
            +
                        nn.Conv2d(ch*16, ch*8, kernel_size=3, stride=1, padding=1), #h=h
         
     | 
| 
      
 78 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*8),
         
     | 
| 
      
 79 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 80 
     | 
    
         
            +
             
     | 
| 
      
 81 
     | 
    
         
            +
                        nn.ConvTranspose2d(ch*8, ch*8, kernel_size=4, stride=2, padding=1), #h=2h; 16
         
     | 
| 
      
 82 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*8),
         
     | 
| 
      
 83 
     | 
    
         
            +
                        nn.ReLU(True),
         
     | 
| 
      
 84 
     | 
    
         
            +
                        nn.Conv2d(ch*8, ch*4, kernel_size=3, stride=1, padding=1), #h=h
         
     | 
| 
      
 85 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*4),
         
     | 
| 
      
 86 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 87 
     | 
    
         
            +
             
     | 
| 
      
 88 
     | 
    
         
            +
                        nn.ConvTranspose2d(ch*4, ch*4, kernel_size=4, stride=2, padding=1), #h=2h; 32
         
     | 
| 
      
 89 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*4),
         
     | 
| 
      
 90 
     | 
    
         
            +
                        nn.ReLU(True),
         
     | 
| 
      
 91 
     | 
    
         
            +
                        nn.Conv2d(ch*4, ch*2, kernel_size=3, stride=1, padding=1), #h=h
         
     | 
| 
      
 92 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*2),
         
     | 
| 
      
 93 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 94 
     | 
    
         
            +
             
     | 
| 
      
 95 
     | 
    
         
            +
                        nn.ConvTranspose2d(ch*2, ch*2, kernel_size=4, stride=2, padding=1), #h=2h; 64
         
     | 
| 
      
 96 
     | 
    
         
            +
                        nn.BatchNorm2d(ch*2),
         
     | 
| 
      
 97 
     | 
    
         
            +
                        nn.ReLU(True),
         
     | 
| 
      
 98 
     | 
    
         
            +
                        nn.Conv2d(ch*2, ch, kernel_size=3, stride=1, padding=1), #h=h
         
     | 
| 
      
 99 
     | 
    
         
            +
                        nn.BatchNorm2d(ch),
         
     | 
| 
      
 100 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 101 
     | 
    
         
            +
             
     | 
| 
      
 102 
     | 
    
         
            +
                        nn.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1), #h=2h; 128
         
     | 
| 
      
 103 
     | 
    
         
            +
                        nn.BatchNorm2d(ch),
         
     | 
| 
      
 104 
     | 
    
         
            +
                        nn.ReLU(True),
         
     | 
| 
      
 105 
     | 
    
         
            +
                        nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1), #h=h
         
     | 
| 
      
 106 
     | 
    
         
            +
                        nn.BatchNorm2d(ch),
         
     | 
| 
      
 107 
     | 
    
         
            +
                        nn.ReLU(),
         
     | 
| 
      
 108 
     | 
    
         
            +
             
     | 
| 
      
 109 
     | 
    
         
            +
                        nn.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1), #h=2h; 256
         
     | 
| 
      
 110 
     | 
    
         
            +
                        nn.BatchNorm2d(ch),
         
     | 
| 
      
 111 
     | 
    
         
            +
                        nn.ReLU(True),
         
     | 
| 
      
 112 
     | 
    
         
            +
                        nn.Conv2d(ch, 3, kernel_size=3, stride=1, padding=1), #h=h
         
     | 
| 
      
 113 
     | 
    
         
            +
                        nn.Tanh()
         
     | 
| 
      
 114 
     | 
    
         
            +
                    )
         
     | 
| 
      
 115 
     | 
    
         
            +
             
     | 
| 
      
 116 
     | 
    
         
            +
                def forward(self, feature):
         
     | 
| 
      
 117 
     | 
    
         
            +
                    out = self.linear(feature)
         
     | 
| 
      
 118 
     | 
    
         
            +
                    out = out.view(-1, self.ch*16, 4, 4)
         
     | 
| 
      
 119 
     | 
    
         
            +
                    out = self.deconv(out)
         
     | 
| 
      
 120 
     | 
    
         
            +
                    return out
         
     | 
| 
      
 121 
     | 
    
         
            +
             
     | 
| 
      
 122 
     | 
    
         
            +
             
     | 
| 
      
 123 
     | 
    
         
            +
            if __name__=="__main__":
         
     | 
| 
      
 124 
     | 
    
         
            +
                #test
         
     | 
| 
      
 125 
     | 
    
         
            +
             
     | 
| 
      
 126 
     | 
    
         
            +
                net_encoder = encoder(dim_bottleneck=512, ch=64).cuda()
         
     | 
| 
      
 127 
     | 
    
         
            +
                net_decoder = decoder(dim_bottleneck=512, ch=64).cuda()
         
     | 
| 
      
 128 
     | 
    
         
            +
             
     | 
| 
      
 129 
     | 
    
         
            +
                x = torch.randn(10, 3, 256, 256).cuda()
         
     | 
| 
      
 130 
     | 
    
         
            +
                f = net_encoder(x)
         
     | 
| 
      
 131 
     | 
    
         
            +
                xh = net_decoder(f)
         
     | 
| 
      
 132 
     | 
    
         
            +
                print(f.size())
         
     | 
| 
      
 133 
     | 
    
         
            +
                print(xh.size())
         
     |