spikezoo 0.1.2__py3-none-any.whl → 0.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (192) hide show
  1. spikezoo/__init__.py +13 -0
  2. spikezoo/archs/__pycache__/__init__.cpython-39.pyc +0 -0
  3. spikezoo/archs/base/__pycache__/nets.cpython-39.pyc +0 -0
  4. spikezoo/archs/base/nets.py +34 -0
  5. spikezoo/archs/bsf/README.md +92 -0
  6. spikezoo/archs/bsf/datasets/datasets.py +328 -0
  7. spikezoo/archs/bsf/datasets/ds_utils.py +64 -0
  8. spikezoo/archs/bsf/main.py +398 -0
  9. spikezoo/archs/bsf/metrics/psnr.py +22 -0
  10. spikezoo/archs/bsf/metrics/ssim.py +54 -0
  11. spikezoo/archs/bsf/models/bsf/__pycache__/align.cpython-39.pyc +0 -0
  12. spikezoo/archs/bsf/models/bsf/__pycache__/bsf.cpython-39.pyc +0 -0
  13. spikezoo/archs/bsf/models/bsf/__pycache__/rep.cpython-39.pyc +0 -0
  14. spikezoo/archs/bsf/models/bsf/align.py +154 -0
  15. spikezoo/archs/bsf/models/bsf/bsf.py +105 -0
  16. spikezoo/archs/bsf/models/bsf/dsft_convert.py +96 -0
  17. spikezoo/archs/bsf/models/bsf/rep.py +44 -0
  18. spikezoo/archs/bsf/models/get_model.py +7 -0
  19. spikezoo/archs/bsf/prepare_data/DSFT.py +62 -0
  20. spikezoo/archs/bsf/prepare_data/crop_dataset_train.py +135 -0
  21. spikezoo/archs/bsf/prepare_data/crop_dataset_val.py +139 -0
  22. spikezoo/archs/bsf/prepare_data/crop_train.sh +4 -0
  23. spikezoo/archs/bsf/prepare_data/crop_val.sh +4 -0
  24. spikezoo/archs/bsf/prepare_data/io_utils.py +64 -0
  25. spikezoo/archs/bsf/requirements.txt +9 -0
  26. spikezoo/archs/bsf/test.py +16 -0
  27. spikezoo/archs/bsf/utils.py +154 -0
  28. spikezoo/archs/spikeclip/__pycache__/nets.cpython-39.pyc +0 -0
  29. spikezoo/archs/spikeclip/nets.py +40 -0
  30. spikezoo/archs/spikeformer/CheckPoints/readme +1 -0
  31. spikezoo/archs/spikeformer/DataProcess/DataExtactor.py +60 -0
  32. spikezoo/archs/spikeformer/DataProcess/DataLoader.py +115 -0
  33. spikezoo/archs/spikeformer/DataProcess/LoadSpike.py +39 -0
  34. spikezoo/archs/spikeformer/EvalResults/readme +1 -0
  35. spikezoo/archs/spikeformer/LICENSE +21 -0
  36. spikezoo/archs/spikeformer/Metrics/Metrics.py +50 -0
  37. spikezoo/archs/spikeformer/Metrics/__init__.py +0 -0
  38. spikezoo/archs/spikeformer/Model/Loss.py +89 -0
  39. spikezoo/archs/spikeformer/Model/SpikeFormer.py +230 -0
  40. spikezoo/archs/spikeformer/Model/__init__.py +0 -0
  41. spikezoo/archs/spikeformer/Model/__pycache__/SpikeFormer.cpython-39.pyc +0 -0
  42. spikezoo/archs/spikeformer/Model/__pycache__/__init__.cpython-39.pyc +0 -0
  43. spikezoo/archs/spikeformer/README.md +30 -0
  44. spikezoo/archs/spikeformer/evaluate.py +87 -0
  45. spikezoo/archs/spikeformer/recon_real_data.py +97 -0
  46. spikezoo/archs/spikeformer/requirements.yml +95 -0
  47. spikezoo/archs/spikeformer/train.py +173 -0
  48. spikezoo/archs/spikeformer/utils.py +22 -0
  49. spikezoo/archs/spk2imgnet/.github/workflows/pylint.yml +23 -0
  50. spikezoo/archs/spk2imgnet/.gitignore +150 -0
  51. spikezoo/archs/spk2imgnet/DCNv2.py +135 -0
  52. spikezoo/archs/spk2imgnet/__pycache__/DCNv2.cpython-39.pyc +0 -0
  53. spikezoo/archs/spk2imgnet/__pycache__/align_arch.cpython-39.pyc +0 -0
  54. spikezoo/archs/spk2imgnet/__pycache__/nets.cpython-39.pyc +0 -0
  55. spikezoo/archs/spk2imgnet/align_arch.py +159 -0
  56. spikezoo/archs/spk2imgnet/dataset.py +144 -0
  57. spikezoo/archs/spk2imgnet/nets.py +230 -0
  58. spikezoo/archs/spk2imgnet/readme.md +86 -0
  59. spikezoo/archs/spk2imgnet/test_gen_imgseq.py +118 -0
  60. spikezoo/archs/spk2imgnet/train.py +189 -0
  61. spikezoo/archs/spk2imgnet/utils.py +64 -0
  62. spikezoo/archs/ssir/README.md +87 -0
  63. spikezoo/archs/ssir/configs/SSIR.yml +37 -0
  64. spikezoo/archs/ssir/configs/yml_parser.py +78 -0
  65. spikezoo/archs/ssir/datasets/dataset_sreds.py +170 -0
  66. spikezoo/archs/ssir/datasets/ds_utils.py +66 -0
  67. spikezoo/archs/ssir/losses.py +21 -0
  68. spikezoo/archs/ssir/main.py +326 -0
  69. spikezoo/archs/ssir/metrics/psnr.py +22 -0
  70. spikezoo/archs/ssir/metrics/ssim.py +54 -0
  71. spikezoo/archs/ssir/models/Vgg19.py +42 -0
  72. spikezoo/archs/ssir/models/__pycache__/layers.cpython-39.pyc +0 -0
  73. spikezoo/archs/ssir/models/__pycache__/networks.cpython-39.pyc +0 -0
  74. spikezoo/archs/ssir/models/layers.py +110 -0
  75. spikezoo/archs/ssir/models/networks.py +61 -0
  76. spikezoo/archs/ssir/requirements.txt +8 -0
  77. spikezoo/archs/ssir/shells/eval_SREDS.sh +6 -0
  78. spikezoo/archs/ssir/shells/train_SSIR.sh +12 -0
  79. spikezoo/archs/ssir/test.py +3 -0
  80. spikezoo/archs/ssir/utils.py +154 -0
  81. spikezoo/archs/ssml/__pycache__/cbam.cpython-39.pyc +0 -0
  82. spikezoo/archs/ssml/__pycache__/model.cpython-39.pyc +0 -0
  83. spikezoo/archs/ssml/cbam.py +224 -0
  84. spikezoo/archs/ssml/model.py +290 -0
  85. spikezoo/archs/ssml/res.png +0 -0
  86. spikezoo/archs/ssml/test.py +67 -0
  87. spikezoo/archs/stir/.git-credentials +0 -0
  88. spikezoo/archs/stir/README.md +65 -0
  89. spikezoo/archs/stir/ckpt_outputs/Descriptions.txt +1 -0
  90. spikezoo/archs/stir/configs/STIR.yml +37 -0
  91. spikezoo/archs/stir/configs/utils.py +155 -0
  92. spikezoo/archs/stir/configs/yml_parser.py +78 -0
  93. spikezoo/archs/stir/datasets/dataset_sreds.py +180 -0
  94. spikezoo/archs/stir/datasets/ds_utils.py +66 -0
  95. spikezoo/archs/stir/eval_SREDS.sh +5 -0
  96. spikezoo/archs/stir/main.py +397 -0
  97. spikezoo/archs/stir/metrics/losses.py +219 -0
  98. spikezoo/archs/stir/metrics/psnr.py +22 -0
  99. spikezoo/archs/stir/metrics/ssim.py +54 -0
  100. spikezoo/archs/stir/models/Vgg19.py +42 -0
  101. spikezoo/archs/stir/models/__pycache__/networks_STIR.cpython-39.pyc +0 -0
  102. spikezoo/archs/stir/models/__pycache__/submodules.cpython-39.pyc +0 -0
  103. spikezoo/archs/stir/models/__pycache__/transformer_new.cpython-39.pyc +0 -0
  104. spikezoo/archs/stir/models/networks_STIR.py +361 -0
  105. spikezoo/archs/stir/models/submodules.py +86 -0
  106. spikezoo/archs/stir/models/transformer_new.py +151 -0
  107. spikezoo/archs/stir/package_core/build/lib/package_core/__init__.py +0 -0
  108. spikezoo/archs/stir/package_core/build/lib/package_core/convertions.py +721 -0
  109. spikezoo/archs/stir/package_core/build/lib/package_core/disp_netS.py +133 -0
  110. spikezoo/archs/stir/package_core/build/lib/package_core/flow_utils.py +167 -0
  111. spikezoo/archs/stir/package_core/build/lib/package_core/generic_train_test.py +76 -0
  112. spikezoo/archs/stir/package_core/build/lib/package_core/geometry.py +458 -0
  113. spikezoo/archs/stir/package_core/build/lib/package_core/image_proc.py +183 -0
  114. spikezoo/archs/stir/package_core/build/lib/package_core/linalg.py +40 -0
  115. spikezoo/archs/stir/package_core/build/lib/package_core/losses.py +198 -0
  116. spikezoo/archs/stir/package_core/build/lib/package_core/metrics.py +51 -0
  117. spikezoo/archs/stir/package_core/build/lib/package_core/model_base.py +53 -0
  118. spikezoo/archs/stir/package_core/build/lib/package_core/net_basics.py +100 -0
  119. spikezoo/archs/stir/package_core/build/lib/package_core/resnet.py +333 -0
  120. spikezoo/archs/stir/package_core/build/lib/package_core/transforms.py +123 -0
  121. spikezoo/archs/stir/package_core/build/lib/package_core/utils.py +72 -0
  122. spikezoo/archs/stir/package_core/dist/package_core-0.0.0-py3.9.egg +0 -0
  123. spikezoo/archs/stir/package_core/package_core/__init__.py +0 -0
  124. spikezoo/archs/stir/package_core/package_core/__pycache__/__init__.cpython-39.pyc +0 -0
  125. spikezoo/archs/stir/package_core/package_core/__pycache__/net_basics.cpython-39.pyc +0 -0
  126. spikezoo/archs/stir/package_core/package_core/convertions.py +721 -0
  127. spikezoo/archs/stir/package_core/package_core/disp_netS.py +133 -0
  128. spikezoo/archs/stir/package_core/package_core/flow_utils.py +167 -0
  129. spikezoo/archs/stir/package_core/package_core/generic_train_test.py +76 -0
  130. spikezoo/archs/stir/package_core/package_core/geometry.py +458 -0
  131. spikezoo/archs/stir/package_core/package_core/image_proc.py +183 -0
  132. spikezoo/archs/stir/package_core/package_core/linalg.py +40 -0
  133. spikezoo/archs/stir/package_core/package_core/losses.py +198 -0
  134. spikezoo/archs/stir/package_core/package_core/metrics.py +51 -0
  135. spikezoo/archs/stir/package_core/package_core/model_base.py +53 -0
  136. spikezoo/archs/stir/package_core/package_core/net_basics.py +100 -0
  137. spikezoo/archs/stir/package_core/package_core/resnet.py +333 -0
  138. spikezoo/archs/stir/package_core/package_core/transforms.py +123 -0
  139. spikezoo/archs/stir/package_core/package_core/utils.py +72 -0
  140. spikezoo/archs/stir/package_core/package_core.egg-info/PKG-INFO +3 -0
  141. spikezoo/archs/stir/package_core/package_core.egg-info/SOURCES.txt +20 -0
  142. spikezoo/archs/stir/package_core/package_core.egg-info/dependency_links.txt +1 -0
  143. spikezoo/archs/stir/package_core/package_core.egg-info/top_level.txt +1 -0
  144. spikezoo/archs/stir/package_core/setup.py +5 -0
  145. spikezoo/archs/stir/requirements.txt +12 -0
  146. spikezoo/archs/stir/train_STIR.sh +9 -0
  147. spikezoo/archs/tfi/__pycache__/nets.cpython-39.pyc +0 -0
  148. spikezoo/archs/tfi/nets.py +43 -0
  149. spikezoo/archs/tfp/__pycache__/nets.cpython-39.pyc +0 -0
  150. spikezoo/archs/tfp/nets.py +13 -0
  151. spikezoo/archs/wgse/README.md +64 -0
  152. spikezoo/archs/wgse/__pycache__/dwtnets.cpython-39.pyc +0 -0
  153. spikezoo/archs/wgse/__pycache__/submodules.cpython-39.pyc +0 -0
  154. spikezoo/archs/wgse/dataset.py +59 -0
  155. spikezoo/archs/wgse/demo.png +0 -0
  156. spikezoo/archs/wgse/demo.py +83 -0
  157. spikezoo/archs/wgse/dwtnets.py +145 -0
  158. spikezoo/archs/wgse/eval.py +133 -0
  159. spikezoo/archs/wgse/logs/WGSE-Dwt1dNet-db8-5-ks3/log.txt +11 -0
  160. spikezoo/archs/wgse/submodules.py +68 -0
  161. spikezoo/archs/wgse/train.py +261 -0
  162. spikezoo/archs/wgse/transform.py +139 -0
  163. spikezoo/archs/wgse/utils.py +128 -0
  164. spikezoo/archs/wgse/weights/demo.png +0 -0
  165. spikezoo/data/base/test/gt/200_part1_key_id151.png +0 -0
  166. spikezoo/data/base/test/gt/200_part3_key_id151.png +0 -0
  167. spikezoo/data/base/test/gt/203_part1_key_id151.png +0 -0
  168. spikezoo/data/base/test/spike/200_part1_key_id151.dat +0 -0
  169. spikezoo/data/base/test/spike/200_part3_key_id151.dat +0 -0
  170. spikezoo/data/base/test/spike/203_part1_key_id151.dat +0 -0
  171. spikezoo/data/base/train/gt/203_part2_key_id151.png +0 -0
  172. spikezoo/data/base/train/gt/203_part3_key_id151.png +0 -0
  173. spikezoo/data/base/train/gt/203_part4_key_id151.png +0 -0
  174. spikezoo/data/base/train/spike/203_part2_key_id151.dat +0 -0
  175. spikezoo/data/base/train/spike/203_part3_key_id151.dat +0 -0
  176. spikezoo/data/base/train/spike/203_part4_key_id151.dat +0 -0
  177. spikezoo/datasets/base_dataset.py +2 -3
  178. spikezoo/metrics/__init__.py +1 -1
  179. spikezoo/models/base_model.py +1 -3
  180. spikezoo/pipeline/base_pipeline.py +7 -5
  181. spikezoo/pipeline/train_pipeline.py +1 -1
  182. spikezoo/utils/other_utils.py +16 -6
  183. spikezoo/utils/spike_utils.py +33 -29
  184. spikezoo/utils/vidar_loader.cpython-39-x86_64-linux-gnu.so +0 -0
  185. spikezoo-0.2.dist-info/METADATA +163 -0
  186. spikezoo-0.2.dist-info/RECORD +211 -0
  187. spikezoo/models/spcsnet_model.py +0 -19
  188. spikezoo-0.1.2.dist-info/METADATA +0 -39
  189. spikezoo-0.1.2.dist-info/RECORD +0 -36
  190. {spikezoo-0.1.2.dist-info → spikezoo-0.2.dist-info}/LICENSE.txt +0 -0
  191. {spikezoo-0.1.2.dist-info → spikezoo-0.2.dist-info}/WHEEL +0 -0
  192. {spikezoo-0.1.2.dist-info → spikezoo-0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,64 @@
1
+ # WGSE-SpikeCamera
2
+
3
+ Codes and Datasets of **"Learning Temporal-ordered Representation for Spike Streams Based on Discrete Wavelet Transforms"**.
4
+
5
+ Jiyuan Zhang*, Shanshan Jia*, Zhaofei Yu $\dagger$ and Tiejun Huang
6
+
7
+ Thirty-Seventh AAAI Conference on Artificial Intelligence (**AAAI 2023**) .
8
+ __________________________________________________
9
+ ## Code for WGSE
10
+
11
+ * **Key Requirements of the Code Environment**
12
+
13
+ * Pytorch > 1.7.0
14
+ * pywavelets
15
+ * pytorch-wavelets
16
+ * scikit-image
17
+ * einops
18
+
19
+ * **Train**
20
+
21
+ To train the network for WGSE, use the file `train.py`
22
+
23
+ the parameter list is:
24
+
25
+ ```python
26
+ parser = argparse.ArgumentParser(description='AAAI - WGSE - REDS')
27
+ parser.add_argument('-c', '--cuda', type=str, default='1', help='select gpu card')
28
+ parser.add_argument('-b', '--batch_size', type=int, default=16)
29
+ parser.add_argument('-e', '--epoch', type=int, default=600)
30
+ parser.add_argument('-w', '--wvl', type=str, default='db8', help='select wavelet base function')
31
+ parser.add_argument('-j', '--jlevels', type=int, default=5)
32
+ parser.add_argument('-k', '--kernel_size', type=int, default=3)
33
+ parser.add_argument('-l', '--logpath', type=str, default='WGSE-Dwt1dNet')
34
+ parser.add_argument('-r', '--resume_from', type=str, default=None)
35
+ parser.add_argument('--dataroot', type=str, default=None)
36
+ ```
37
+
38
+ `-c` is the CUDA device index on your computer, `-b` is the batchsize, `-e` is the number of epoch of training, `-w` is the wavelet function, `-j` is the decompostion level, `-k` is the kernel size in the WGSE, `-r` is the folder where saving the weights that you want to load and resume training, `--dataroot` is the root path of the dataset and `-l` is the folder path where you want to save the log files and weight file of the model.
39
+
40
+ In our implementation, the training script should be:
41
+
42
+ ``` bash
43
+ python train.py -c 0 -b 16 -e 600 --dataroot "rootpath/of/dataset" -l "folder/for/save/logfiles"
44
+ ```
45
+
46
+ * **Test**
47
+
48
+ To test the network for WGSE, use the file `demo.py`
49
+
50
+ the parameter list is:
51
+
52
+ ```python
53
+ parser = argparse.ArgumentParser(description='AAAI - WGSE - REDS')
54
+ parser.add_argument('-w', '--wvl', type=str, default='db8', help='select wavelet base function')
55
+ parser.add_argument('-j', '--jlevels', type=int, default=5)
56
+ parser.add_argument('-k', '--kernel_size', type=int, default=3)
57
+ parser.add_argument('-l', '--logpath', type=str, default='WGSE-Dwt1dNet')
58
+ parser.add_argument('-f', '--datfile', type=str, default=None, help='path of the spike data to be tested')
59
+ ```
60
+
61
+ `-w` is the wavelet function, `-j` is the decompostion level, `-k` is the kernel size in the WGSE, `-l` is the folder path where you save the weight file, `-f` is the `.dat` data path of spikes that you want to test.
62
+
63
+ ## The Dataset for WGSE
64
+ In this work, we propose a synthetic dataset "Spike-Cityscapes" for semantic segmentation based on the spike streams generated from [Cityscapes](https://www.cityscapes-dataset.com/). The spike data is available and you can download them at [https://pan.baidu.com/s/1lB4qpfZwaVN6WDFo5MRR-w](https://pan.baidu.com/s/1lB4qpfZwaVN6WDFo5MRR-w) with the password **svpg**.
@@ -0,0 +1,59 @@
1
+ import glob
2
+ import os
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torch.utils.data as data
7
+ from skimage import io
8
+
9
+ import utils
10
+
11
+
12
+ class DatasetREDS(data.Dataset):
13
+ def __init__(self, cfg, transform=None):
14
+ super(DatasetREDS, self).__init__()
15
+ self.cfg = cfg
16
+ self.rootfolder = cfg['rootfolder']
17
+ self.spikefolder = os.path.join(self.rootfolder, cfg['spikefolder'])
18
+ self.imagefolder = os.path.join(self.rootfolder, cfg['imagefolder'])
19
+
20
+ self.spike_list = os.listdir(self.spikefolder)
21
+ self.image_list = os.listdir(self.imagefolder)
22
+
23
+ self.H = int(cfg['H'])
24
+ self.W = int(cfg['W'])
25
+ self.C_SPIKE = int(cfg['C'])
26
+
27
+ self.transform = transform
28
+
29
+ def __getitem__(self, index: int):
30
+ item_name = self.spike_list[index][:-4]
31
+ spike_path = os.path.join(self.spikefolder, item_name+'.dat')
32
+ image_path = os.path.join(self.imagefolder, item_name+'.png')
33
+
34
+ path_grayframe = glob.glob(image_path)
35
+ gray_frame = io.imread(path_grayframe[0], as_gray=False).astype(np.float32)
36
+
37
+ gray_frame /= 255.0 # normalize
38
+ gray_frame = np.expand_dims(gray_frame, axis=0) # expand to [1 x H x W]
39
+ gray_frame = torch.from_numpy(gray_frame)
40
+
41
+ f = open(spike_path, 'rb')
42
+ spike_seq = f.read()
43
+ spike_seq = np.frombuffer(spike_seq, 'b')
44
+ spikes = utils.RawToSpike(spike_seq, self.H, self.W)
45
+ spikes = spikes.astype(np.float32)
46
+ spikes = torch.from_numpy(spikes)
47
+ f.close()
48
+
49
+ if self.transform:
50
+ gray_frame, spikes = self.transform(gray_frame, spikes)
51
+
52
+ item = {}
53
+ item['spikes'] = spikes
54
+ item['image'] = gray_frame
55
+
56
+ return item
57
+
58
+ def __len__(self) -> int:
59
+ return len(self.spike_list)
Binary file
@@ -0,0 +1,83 @@
1
+ import os
2
+
3
+ import numpy as np
4
+ import cv2
5
+ import torch
6
+ import argparse
7
+
8
+ from dwtnets import Dwt1dResnetX_TCN
9
+ from utils import RawToSpike
10
+ from einops import rearrange
11
+ from pytorch_wavelets import DWT1DForward
12
+
13
+
14
+ parser = argparse.ArgumentParser(description='AAAI - WGSE - REDS')
15
+ parser.add_argument('-w', '--wvl', type=str, default='db8', help='select wavelet base function')
16
+ parser.add_argument('-j', '--jlevels', type=int, default=5)
17
+ parser.add_argument('-k', '--kernel_size', type=int, default=3)
18
+ parser.add_argument('-l', '--logpath', type=str, default='WGSE-Dwt1dNet')
19
+ parser.add_argument('-f', '--datfile', type=str, default="/home/chenkang455/chenk/myproject/SpikeCLIP/selfcode/data/recVidarReal2019/classB/train-350kmh.dat", help='path of the spike data to be tested')
20
+
21
+ args = parser.parse_args()
22
+ dataroot = args.datfile
23
+ wvlname = args.wvl
24
+ j = args.jlevels
25
+ logfolder = args.logpath
26
+ ks = args.kernel_size
27
+
28
+
29
+ def nor(x):
30
+ return (x-np.min(x))/(np.max(x)-np.min(x))
31
+
32
+ def ensure_dir(path):
33
+ if not os.path.exists(path):
34
+ os.makedirs(path)
35
+
36
+ def progress_bar_time(total_time):
37
+ hour = int(total_time) // 3600
38
+ minu = (int(total_time) % 3600) // 60
39
+ sec = int(total_time) % 60
40
+ return '%d:%02d:%02d' % (hour, minu, sec)
41
+
42
+
43
+ def main():
44
+
45
+ f = open(dataroot, 'rb')
46
+ spike_seq = f.read()
47
+ spike_seq = np.frombuffer(spike_seq, 'b')
48
+ spikes = RawToSpike(spike_seq, 250, 400)
49
+ spikes = spikes.astype(np.float32)
50
+ spikes = torch.from_numpy(spikes)
51
+ f.close()
52
+
53
+ spikes = spikes[None, 130:171, :, :]
54
+
55
+ s = spikes[:, :, 0:1, 0:1]
56
+ dwt = DWT1DForward(wave=wvlname, J=j)
57
+ s_r = rearrange(s, 'b t h w -> b h w t')
58
+ s_r = rearrange(s_r, 'b h w t -> (b h w) 1 t')
59
+ yl, yh = dwt(s_r)
60
+ yl_size = yl.shape[-1]
61
+ yh_size = [yhi.shape[-1] for yhi in yh]
62
+ print(yl_size,yh_size)
63
+ model = Dwt1dResnetX_TCN(
64
+ wvlname=wvlname, J=j, yl_size=yl_size, yh_size=yh_size, num_residual_blocks=3, norm=None, ks=ks, store_features=True
65
+ )
66
+ print(model)
67
+
68
+ saved_state_dict = torch.load('model_best.pt')
69
+ model.load_state_dict(saved_state_dict.module.state_dict())
70
+
71
+ model = model.cuda()
72
+ model.eval()
73
+
74
+ pred = model(spikes.cuda())
75
+ prediction = pred[0].permute(1,2,0).cpu().detach().numpy()
76
+ print(os.path.join(logfolder, 'demo.png'))
77
+ cv2.imwrite( 'demo.png', prediction * 255.0)
78
+
79
+
80
+ if __name__ == '__main__':
81
+ main()
82
+
83
+
@@ -0,0 +1,145 @@
1
+ import torch.nn as nn
2
+ from einops import rearrange
3
+ import os
4
+ import sys
5
+ current_dir = os.path.dirname(os.path.abspath(__file__))
6
+ sys.path.append(current_dir)
7
+
8
+ from pytorch_wavelets import DWT1DForward, DWT1DInverse
9
+
10
+ from submodules import ResidualBlock
11
+
12
+
13
+ class TcnResidualLayer(nn.Module):
14
+ def __init__(self, in_c, out_c, dilated=1, k=3, s=1, p=1, store_features=False):
15
+ super().__init__()
16
+ self.tcn0 = nn.Sequential(
17
+ nn.Conv1d(in_c, out_c, kernel_size=k, stride=s, padding=p, dilation=dilated),
18
+ nn.ReLU(),
19
+ )
20
+ self.tcn1 = nn.Sequential(
21
+ nn.Conv1d(out_c, out_c, kernel_size=k, stride=s, padding=p, dilation=dilated),
22
+ )
23
+ self.relu = nn.ReLU(inplace=False)
24
+ self.store_features = store_features
25
+ self.features = {}
26
+
27
+ def forward(self, x):
28
+ residual = x
29
+ out = self.tcn0(x)
30
+ if self.store_features:
31
+ self.features['after_tcn0'] = out
32
+ out = self.tcn1(out)
33
+ out = out + residual
34
+ out = self.relu(out)
35
+ return out
36
+
37
+
38
+ class Dwt1dModule_Tcn(nn.Module):
39
+ def __init__(
40
+ self,
41
+ wvlname='db1',
42
+ J=3,
43
+ yl_size=14,
44
+ yh_size=[26, 18, 14],
45
+ ks = 3,
46
+ store_features=False
47
+ ):
48
+ super().__init__()
49
+ self.wvlname = wvlname
50
+ self.J = J
51
+ self.yl_num = yl_size
52
+ self.yh_num = yh_size
53
+ self.yh_blocks = nn.ModuleList()
54
+
55
+ self.store_features = store_features
56
+ self.features = {}
57
+
58
+ for i in self.yh_num:
59
+ self.yh_blocks.append(
60
+ nn.Sequential(
61
+ TcnResidualLayer(1, 32, store_features=store_features, k=ks, p=ks//2),
62
+ nn.Conv1d(32, 1, kernel_size=ks, padding=ks//2, dilation=1),
63
+ nn.ReLU(),
64
+ )
65
+ )
66
+ self.yl_block = nn.Sequential(
67
+ TcnResidualLayer(1, 32, store_features=store_features, k=ks, p=ks//2),
68
+ nn.Conv1d(32, 1, kernel_size=ks, padding=ks//2, dilation=1),
69
+ nn.ReLU(),
70
+ )
71
+ self.dwt = DWT1DForward(wave=self.wvlname, J=self.J)
72
+ self.idwt = DWT1DInverse(wave=self.wvlname)
73
+
74
+ def forward(self, x):
75
+ B, T, H, W = x.shape
76
+ x_r = rearrange(x, 'b t h w -> b h w t')
77
+ x_r = rearrange(x_r, 'b h w t -> (b h w) 1 t')
78
+
79
+ yl, yh = self.dwt(x_r)
80
+ yl_out = self.yl_block(yl)
81
+ yh_out = []
82
+ for i, yhi in enumerate(yh):
83
+ yhi_out = self.yh_blocks[i](yhi)
84
+ yh_out.append(yhi_out)
85
+
86
+ out = self.idwt((yl_out, yh_out))
87
+ out = rearrange(out, '(b h w) 1 t -> b h w t', b=B, h=H, w=W)
88
+ out = rearrange(out, 'b h w t -> b t h w')
89
+
90
+ return out
91
+
92
+
93
+
94
+ class Dwt1dResnetX_TCN(nn.Module):
95
+ def __init__(
96
+ self,
97
+ wvlname='db1',
98
+ J=3,
99
+ yl_size=14,
100
+ yh_size=[26, 18, 14],
101
+ num_residual_blocks=2,
102
+ norm=None,
103
+ inc=41,
104
+ ks=3,
105
+ store_features=False
106
+ ):
107
+ super().__init__()
108
+
109
+ self.wvl = Dwt1dModule_Tcn(wvlname, J, yl_size, yh_size, store_features=store_features, ks=ks)
110
+
111
+ self.norm = norm
112
+ self.num_residual_blocks = num_residual_blocks
113
+ self.resblocks = nn.ModuleList()
114
+ for _ in range(self.num_residual_blocks):
115
+ self.resblocks.append(ResidualBlock(256, 256, norm=self.norm))
116
+
117
+
118
+ self.conv = nn.Sequential(
119
+ nn.Conv2d(inc if inc%2==0 else inc+1, 256, kernel_size=3, padding=1, bias=True),
120
+ nn.ReLU(),
121
+ )
122
+
123
+
124
+ self.tail = nn.Sequential(
125
+ nn.Conv2d(256, 64, kernel_size=3, padding=1, bias=True),
126
+ nn.ReLU(),
127
+
128
+ nn.Conv2d(64, 1, kernel_size=3, padding=1, bias=True),
129
+ nn.ReLU(),
130
+ )
131
+
132
+ self.store_features = store_features
133
+ self.features = {}
134
+
135
+ def forward(self, x):
136
+ y = self.wvl(x)
137
+
138
+ y = self.conv(y)
139
+
140
+ for resi, resblock in enumerate(self.resblocks):
141
+ y = resblock(y)
142
+
143
+ out = self.tail(y)
144
+
145
+ return out
@@ -0,0 +1,133 @@
1
+ import os
2
+ import time
3
+
4
+ import numpy as np
5
+ import cv2
6
+ import torch
7
+ import argparse
8
+
9
+ from dataset import DatasetREDS
10
+ from dwtnets import Dwt1dResnetX_TCN
11
+ from utils import calculate_psnr, calculate_ssim
12
+ from einops import rearrange
13
+ from pytorch_wavelets import DWT1DForward
14
+
15
+
16
+ parser = argparse.ArgumentParser(description='AAAI - WGSE - REDS')
17
+ parser.add_argument('-w', '--wvl', type=str, default='db8', help='select wavelet base function')
18
+ parser.add_argument('-j', '--jlevels', type=int, default=5)
19
+ parser.add_argument('-k', '--kernel_size', type=int, default=3)
20
+ parser.add_argument('-l', '--logpath', type=str, default='WGSE-Dwt1dNet')
21
+ parser.add_argument('--dataroot', type=str, default=None)
22
+
23
+ args = parser.parse_args()
24
+ dataroot = args.dataroot
25
+ wvlname = args.wvl
26
+ j = args.jlevels
27
+ logfolder = args.logpath
28
+ ks = args.kernel_size
29
+
30
+
31
+ def nor(x):
32
+ return (x-np.min(x))/(np.max(x)-np.min(x))
33
+
34
+ def save_wvl(wvl, savefolder, saveprefix):
35
+ wvl = wvl.squeeze()
36
+ t, h, w = wvl.shape
37
+ wvl = nor(wvl)
38
+
39
+ for i in range(t):
40
+ wvl_t = wvl[i] * 255
41
+ cv2.imwrite(os.path.join(savefolder, saveprefix+'_{:03d}.png'.format(i)), wvl_t)
42
+
43
+ def ensure_dir(path):
44
+ if not os.path.exists(path):
45
+ os.makedirs(path)
46
+
47
+ def progress_bar_time(total_time):
48
+ hour = int(total_time) // 3600
49
+ minu = (int(total_time) % 3600) // 60
50
+ sec = int(total_time) % 60
51
+ return '%d:%02d:%02d' % (hour, minu, sec)
52
+
53
+
54
+ def main():
55
+ cfg = {}
56
+ cfg['rootfolder'] = os.path.join(dataroot, 'val')
57
+ cfg['spikefolder'] = 'input'
58
+ cfg['imagefolder'] = 'gt'
59
+ cfg['H'] = 250
60
+ cfg['W'] = 400
61
+ cfg['C'] = 41
62
+ test_set = DatasetREDS(cfg)
63
+
64
+ test_data_loader = torch.utils.data.DataLoader(
65
+ dataset=test_set,
66
+ batch_size=1,
67
+ shuffle=False,
68
+ num_workers=1,
69
+ drop_last=False)
70
+
71
+ item0 = test_set[0]
72
+ s = item0['spikes']
73
+ s = s[None, :, 0:1, 0:1]
74
+ dwt = DWT1DForward(wave=wvlname, J=j)
75
+ B, T, H, W = s.shape
76
+ s_r = rearrange(s, 'b t h w -> b h w t')
77
+ s_r = rearrange(s_r, 'b h w t -> (b h w) 1 t')
78
+ yl, yh = dwt(s_r)
79
+ yl_size = yl.shape[-1]
80
+ yh_size = [yhi.shape[-1] for yhi in yh]
81
+
82
+ model = Dwt1dResnetX_TCN(
83
+ wvlname=wvlname, J=j, yl_size=yl_size, yh_size=yh_size, num_residual_blocks=3, norm=None, ks=ks, store_features=True
84
+ )
85
+ print(model)
86
+
87
+ saved_state_dict = torch.load(logfolder + '/model_best.pt')
88
+ model.load_state_dict(saved_state_dict.module.state_dict())
89
+
90
+ model = model.cuda()
91
+ # model = torch.nn.DataParallel(model).cuda()
92
+
93
+ model.eval()
94
+
95
+ with torch.no_grad():
96
+ sum_ssim = 0.0
97
+ sum_psnr = 0.0
98
+ sum_num = 0
99
+ total_time = 0
100
+ for i, item in enumerate(test_data_loader):
101
+ start_time = time.time()
102
+
103
+ spikes = item['spikes'][:, 130:171, :, :].cuda()
104
+ image = item['image'].cuda()
105
+
106
+ pred = model(spikes)
107
+
108
+ prediction = pred[0].permute(1,2,0).cpu().numpy()
109
+ gt = image[0].permute(1,2,0).cpu().numpy()
110
+
111
+ sum_ssim += calculate_ssim(gt * 255.0, prediction * 255.0)
112
+ sum_psnr += calculate_psnr(gt * 255.0, prediction * 255.0)
113
+ sum_num += 1
114
+ elapse_time = time.time() - start_time
115
+ total_time += elapse_time
116
+
117
+ print('\r[evaluating] %3.2f%% | %6d/%6d [%s<%s, %.2fs/it]' % (
118
+ float(i + 1) / int(len(test_data_loader)) * 100, i + 1, int(len(test_data_loader)),
119
+ progress_bar_time(total_time),
120
+ progress_bar_time(total_time / (i + 1) * int(len(test_data_loader))),
121
+ total_time / (i + 1)), end='')
122
+
123
+ sum_psnr /= sum_num
124
+ sum_ssim /= sum_num
125
+
126
+ print('')
127
+ print('\r[Evaluation Result] PSNR: %.4f | SSIM: %.4f' % (sum_psnr, sum_ssim))
128
+
129
+
130
+ if __name__ == '__main__':
131
+ main()
132
+
133
+
@@ -0,0 +1,11 @@
1
+ [training] 0.50% | 1/ 200 [0:00:03<0:11:10, 3.35s/it] | LOSS: 0.4502 | LR: 0.0001
2
+ [training] 1.00% | 2/ 200 [0:00:03<0:05:38, 1.69s/it] | LOSS: 0.3112 | LR: 0.0001
3
+ [training] 1.50% | 3/ 200 [0:00:03<0:03:48, 1.14s/it] | LOSS: 0.2487 | LR: 0.0001
4
+ [training] 2.00% | 4/ 200 [0:00:03<0:02:53, 0.87s/it] | LOSS: 0.1424 | LR: 0.0001
5
+ [training] 2.50% | 5/ 200 [0:00:03<0:02:20, 0.70s/it] | LOSS: 0.2075 | LR: 0.0001
6
+ [training] 3.00% | 6/ 200 [0:00:03<0:01:58, 0.59s/it] | LOSS: 0.2425 | LR: 0.0001
7
+ [training] 3.50% | 7/ 200 [0:00:03<0:01:42, 0.51s/it] | LOSS: 0.2178 | LR: 0.0001
8
+ [training] 4.00% | 8/ 200 [0:00:03<0:01:30, 0.45s/it] | LOSS: 0.1428 | LR: 0.0001
9
+ [training] 4.50% | 9/ 200 [0:00:03<0:01:21, 0.41s/it] | LOSS: 0.1512 | LR: 0.0001
10
+ [training] 5.00% | 10/ 200 [0:00:03<0:01:14, 0.37s/it] | LOSS: 0.1258 | LR: 0.0001
11
+ [training] 5.50% | 11/ 200 [0:00:03<0:01:08, 0.34s/it] | LOSS: 0.1700 | LR: 0.0001
@@ -0,0 +1,68 @@
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+
5
+ class ConvLayer(nn.Module):
6
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, activation='relu', norm=None,
7
+ BN_momentum=0.1):
8
+ super(ConvLayer, self).__init__()
9
+
10
+ bias = False if norm == 'BN' else True
11
+ self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
12
+ if activation is not None:
13
+ self.activation = getattr(torch, activation)
14
+ else:
15
+ self.activation = None
16
+
17
+ self.norm = norm
18
+ if norm == 'BN':
19
+ self.norm_layer = nn.BatchNorm2d(out_channels, momentum=BN_momentum)
20
+ elif norm == 'IN':
21
+ self.norm_layer = nn.InstanceNorm2d(out_channels, track_running_stats=True)
22
+
23
+ def forward(self, x):
24
+ out = self.conv2d(x)
25
+
26
+ if self.norm in ['BN', 'IN']:
27
+ out = self.norm_layer(out)
28
+
29
+ if self.activation is not None:
30
+ out = self.activation(out)
31
+
32
+ return out
33
+
34
+
35
+ class ResidualBlock(nn.Module):
36
+ def __init__(self, in_channels, out_channels, stride=1, downsample=None, norm=None,
37
+ BN_momentum=0.1):
38
+ super(ResidualBlock, self).__init__()
39
+ bias = False if norm == 'BN' else True
40
+ self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=bias)
41
+ self.norm = norm
42
+ if norm == 'BN':
43
+ self.bn1 = nn.BatchNorm2d(out_channels, momentum=BN_momentum)
44
+ self.bn2 = nn.BatchNorm2d(out_channels, momentum=BN_momentum)
45
+ elif norm == 'IN':
46
+ self.bn1 = nn.InstanceNorm2d(out_channels)
47
+ self.bn2 = nn.InstanceNorm2d(out_channels)
48
+
49
+ self.relu = nn.ReLU(inplace=False)
50
+ self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=bias)
51
+ self.downsample = downsample
52
+
53
+ def forward(self, x):
54
+ residual = x
55
+ out = self.conv1(x)
56
+ if self.norm in ['BN', 'IN']:
57
+ out = self.bn1(out)
58
+ out = self.relu(out)
59
+ out = self.conv2(out)
60
+ if self.norm in ['BN', 'IN']:
61
+ out = self.bn2(out)
62
+
63
+ if self.downsample:
64
+ residual = self.downsample(x)
65
+
66
+ out += residual
67
+ out = self.relu(out)
68
+ return out