spikezoo 0.1.2__py3-none-any.whl → 0.2.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (192) hide show
  1. spikezoo/__init__.py +13 -0
  2. spikezoo/archs/__pycache__/__init__.cpython-39.pyc +0 -0
  3. spikezoo/archs/base/__pycache__/nets.cpython-39.pyc +0 -0
  4. spikezoo/archs/base/nets.py +34 -0
  5. spikezoo/archs/bsf/README.md +92 -0
  6. spikezoo/archs/bsf/datasets/datasets.py +328 -0
  7. spikezoo/archs/bsf/datasets/ds_utils.py +64 -0
  8. spikezoo/archs/bsf/main.py +398 -0
  9. spikezoo/archs/bsf/metrics/psnr.py +22 -0
  10. spikezoo/archs/bsf/metrics/ssim.py +54 -0
  11. spikezoo/archs/bsf/models/bsf/__pycache__/align.cpython-39.pyc +0 -0
  12. spikezoo/archs/bsf/models/bsf/__pycache__/bsf.cpython-39.pyc +0 -0
  13. spikezoo/archs/bsf/models/bsf/__pycache__/rep.cpython-39.pyc +0 -0
  14. spikezoo/archs/bsf/models/bsf/align.py +154 -0
  15. spikezoo/archs/bsf/models/bsf/bsf.py +105 -0
  16. spikezoo/archs/bsf/models/bsf/dsft_convert.py +96 -0
  17. spikezoo/archs/bsf/models/bsf/rep.py +44 -0
  18. spikezoo/archs/bsf/models/get_model.py +7 -0
  19. spikezoo/archs/bsf/prepare_data/DSFT.py +62 -0
  20. spikezoo/archs/bsf/prepare_data/crop_dataset_train.py +135 -0
  21. spikezoo/archs/bsf/prepare_data/crop_dataset_val.py +139 -0
  22. spikezoo/archs/bsf/prepare_data/crop_train.sh +4 -0
  23. spikezoo/archs/bsf/prepare_data/crop_val.sh +4 -0
  24. spikezoo/archs/bsf/prepare_data/io_utils.py +64 -0
  25. spikezoo/archs/bsf/requirements.txt +9 -0
  26. spikezoo/archs/bsf/test.py +16 -0
  27. spikezoo/archs/bsf/utils.py +154 -0
  28. spikezoo/archs/spikeclip/__pycache__/nets.cpython-39.pyc +0 -0
  29. spikezoo/archs/spikeclip/nets.py +40 -0
  30. spikezoo/archs/spikeformer/CheckPoints/readme +1 -0
  31. spikezoo/archs/spikeformer/DataProcess/DataExtactor.py +60 -0
  32. spikezoo/archs/spikeformer/DataProcess/DataLoader.py +115 -0
  33. spikezoo/archs/spikeformer/DataProcess/LoadSpike.py +39 -0
  34. spikezoo/archs/spikeformer/EvalResults/readme +1 -0
  35. spikezoo/archs/spikeformer/LICENSE +21 -0
  36. spikezoo/archs/spikeformer/Metrics/Metrics.py +50 -0
  37. spikezoo/archs/spikeformer/Metrics/__init__.py +0 -0
  38. spikezoo/archs/spikeformer/Model/Loss.py +89 -0
  39. spikezoo/archs/spikeformer/Model/SpikeFormer.py +230 -0
  40. spikezoo/archs/spikeformer/Model/__init__.py +0 -0
  41. spikezoo/archs/spikeformer/Model/__pycache__/SpikeFormer.cpython-39.pyc +0 -0
  42. spikezoo/archs/spikeformer/Model/__pycache__/__init__.cpython-39.pyc +0 -0
  43. spikezoo/archs/spikeformer/README.md +30 -0
  44. spikezoo/archs/spikeformer/evaluate.py +87 -0
  45. spikezoo/archs/spikeformer/recon_real_data.py +97 -0
  46. spikezoo/archs/spikeformer/requirements.yml +95 -0
  47. spikezoo/archs/spikeformer/train.py +173 -0
  48. spikezoo/archs/spikeformer/utils.py +22 -0
  49. spikezoo/archs/spk2imgnet/.github/workflows/pylint.yml +23 -0
  50. spikezoo/archs/spk2imgnet/.gitignore +150 -0
  51. spikezoo/archs/spk2imgnet/DCNv2.py +135 -0
  52. spikezoo/archs/spk2imgnet/__pycache__/DCNv2.cpython-39.pyc +0 -0
  53. spikezoo/archs/spk2imgnet/__pycache__/align_arch.cpython-39.pyc +0 -0
  54. spikezoo/archs/spk2imgnet/__pycache__/nets.cpython-39.pyc +0 -0
  55. spikezoo/archs/spk2imgnet/align_arch.py +159 -0
  56. spikezoo/archs/spk2imgnet/dataset.py +144 -0
  57. spikezoo/archs/spk2imgnet/nets.py +230 -0
  58. spikezoo/archs/spk2imgnet/readme.md +86 -0
  59. spikezoo/archs/spk2imgnet/test_gen_imgseq.py +118 -0
  60. spikezoo/archs/spk2imgnet/train.py +189 -0
  61. spikezoo/archs/spk2imgnet/utils.py +64 -0
  62. spikezoo/archs/ssir/README.md +87 -0
  63. spikezoo/archs/ssir/configs/SSIR.yml +37 -0
  64. spikezoo/archs/ssir/configs/yml_parser.py +78 -0
  65. spikezoo/archs/ssir/datasets/dataset_sreds.py +170 -0
  66. spikezoo/archs/ssir/datasets/ds_utils.py +66 -0
  67. spikezoo/archs/ssir/losses.py +21 -0
  68. spikezoo/archs/ssir/main.py +326 -0
  69. spikezoo/archs/ssir/metrics/psnr.py +22 -0
  70. spikezoo/archs/ssir/metrics/ssim.py +54 -0
  71. spikezoo/archs/ssir/models/Vgg19.py +42 -0
  72. spikezoo/archs/ssir/models/__pycache__/layers.cpython-39.pyc +0 -0
  73. spikezoo/archs/ssir/models/__pycache__/networks.cpython-39.pyc +0 -0
  74. spikezoo/archs/ssir/models/layers.py +110 -0
  75. spikezoo/archs/ssir/models/networks.py +61 -0
  76. spikezoo/archs/ssir/requirements.txt +8 -0
  77. spikezoo/archs/ssir/shells/eval_SREDS.sh +6 -0
  78. spikezoo/archs/ssir/shells/train_SSIR.sh +12 -0
  79. spikezoo/archs/ssir/test.py +3 -0
  80. spikezoo/archs/ssir/utils.py +154 -0
  81. spikezoo/archs/ssml/__pycache__/cbam.cpython-39.pyc +0 -0
  82. spikezoo/archs/ssml/__pycache__/model.cpython-39.pyc +0 -0
  83. spikezoo/archs/ssml/cbam.py +224 -0
  84. spikezoo/archs/ssml/model.py +290 -0
  85. spikezoo/archs/ssml/res.png +0 -0
  86. spikezoo/archs/ssml/test.py +67 -0
  87. spikezoo/archs/stir/.git-credentials +0 -0
  88. spikezoo/archs/stir/README.md +65 -0
  89. spikezoo/archs/stir/ckpt_outputs/Descriptions.txt +1 -0
  90. spikezoo/archs/stir/configs/STIR.yml +37 -0
  91. spikezoo/archs/stir/configs/utils.py +155 -0
  92. spikezoo/archs/stir/configs/yml_parser.py +78 -0
  93. spikezoo/archs/stir/datasets/dataset_sreds.py +180 -0
  94. spikezoo/archs/stir/datasets/ds_utils.py +66 -0
  95. spikezoo/archs/stir/eval_SREDS.sh +5 -0
  96. spikezoo/archs/stir/main.py +397 -0
  97. spikezoo/archs/stir/metrics/losses.py +219 -0
  98. spikezoo/archs/stir/metrics/psnr.py +22 -0
  99. spikezoo/archs/stir/metrics/ssim.py +54 -0
  100. spikezoo/archs/stir/models/Vgg19.py +42 -0
  101. spikezoo/archs/stir/models/__pycache__/networks_STIR.cpython-39.pyc +0 -0
  102. spikezoo/archs/stir/models/__pycache__/submodules.cpython-39.pyc +0 -0
  103. spikezoo/archs/stir/models/__pycache__/transformer_new.cpython-39.pyc +0 -0
  104. spikezoo/archs/stir/models/networks_STIR.py +361 -0
  105. spikezoo/archs/stir/models/submodules.py +86 -0
  106. spikezoo/archs/stir/models/transformer_new.py +151 -0
  107. spikezoo/archs/stir/package_core/build/lib/package_core/__init__.py +0 -0
  108. spikezoo/archs/stir/package_core/build/lib/package_core/convertions.py +721 -0
  109. spikezoo/archs/stir/package_core/build/lib/package_core/disp_netS.py +133 -0
  110. spikezoo/archs/stir/package_core/build/lib/package_core/flow_utils.py +167 -0
  111. spikezoo/archs/stir/package_core/build/lib/package_core/generic_train_test.py +76 -0
  112. spikezoo/archs/stir/package_core/build/lib/package_core/geometry.py +458 -0
  113. spikezoo/archs/stir/package_core/build/lib/package_core/image_proc.py +183 -0
  114. spikezoo/archs/stir/package_core/build/lib/package_core/linalg.py +40 -0
  115. spikezoo/archs/stir/package_core/build/lib/package_core/losses.py +198 -0
  116. spikezoo/archs/stir/package_core/build/lib/package_core/metrics.py +51 -0
  117. spikezoo/archs/stir/package_core/build/lib/package_core/model_base.py +53 -0
  118. spikezoo/archs/stir/package_core/build/lib/package_core/net_basics.py +100 -0
  119. spikezoo/archs/stir/package_core/build/lib/package_core/resnet.py +333 -0
  120. spikezoo/archs/stir/package_core/build/lib/package_core/transforms.py +123 -0
  121. spikezoo/archs/stir/package_core/build/lib/package_core/utils.py +72 -0
  122. spikezoo/archs/stir/package_core/dist/package_core-0.0.0-py3.9.egg +0 -0
  123. spikezoo/archs/stir/package_core/package_core/__init__.py +0 -0
  124. spikezoo/archs/stir/package_core/package_core/__pycache__/__init__.cpython-39.pyc +0 -0
  125. spikezoo/archs/stir/package_core/package_core/__pycache__/net_basics.cpython-39.pyc +0 -0
  126. spikezoo/archs/stir/package_core/package_core/convertions.py +721 -0
  127. spikezoo/archs/stir/package_core/package_core/disp_netS.py +133 -0
  128. spikezoo/archs/stir/package_core/package_core/flow_utils.py +167 -0
  129. spikezoo/archs/stir/package_core/package_core/generic_train_test.py +76 -0
  130. spikezoo/archs/stir/package_core/package_core/geometry.py +458 -0
  131. spikezoo/archs/stir/package_core/package_core/image_proc.py +183 -0
  132. spikezoo/archs/stir/package_core/package_core/linalg.py +40 -0
  133. spikezoo/archs/stir/package_core/package_core/losses.py +198 -0
  134. spikezoo/archs/stir/package_core/package_core/metrics.py +51 -0
  135. spikezoo/archs/stir/package_core/package_core/model_base.py +53 -0
  136. spikezoo/archs/stir/package_core/package_core/net_basics.py +100 -0
  137. spikezoo/archs/stir/package_core/package_core/resnet.py +333 -0
  138. spikezoo/archs/stir/package_core/package_core/transforms.py +123 -0
  139. spikezoo/archs/stir/package_core/package_core/utils.py +72 -0
  140. spikezoo/archs/stir/package_core/package_core.egg-info/PKG-INFO +3 -0
  141. spikezoo/archs/stir/package_core/package_core.egg-info/SOURCES.txt +20 -0
  142. spikezoo/archs/stir/package_core/package_core.egg-info/dependency_links.txt +1 -0
  143. spikezoo/archs/stir/package_core/package_core.egg-info/top_level.txt +1 -0
  144. spikezoo/archs/stir/package_core/setup.py +5 -0
  145. spikezoo/archs/stir/requirements.txt +12 -0
  146. spikezoo/archs/stir/train_STIR.sh +9 -0
  147. spikezoo/archs/tfi/__pycache__/nets.cpython-39.pyc +0 -0
  148. spikezoo/archs/tfi/nets.py +43 -0
  149. spikezoo/archs/tfp/__pycache__/nets.cpython-39.pyc +0 -0
  150. spikezoo/archs/tfp/nets.py +13 -0
  151. spikezoo/archs/wgse/README.md +64 -0
  152. spikezoo/archs/wgse/__pycache__/dwtnets.cpython-39.pyc +0 -0
  153. spikezoo/archs/wgse/__pycache__/submodules.cpython-39.pyc +0 -0
  154. spikezoo/archs/wgse/dataset.py +59 -0
  155. spikezoo/archs/wgse/demo.png +0 -0
  156. spikezoo/archs/wgse/demo.py +83 -0
  157. spikezoo/archs/wgse/dwtnets.py +145 -0
  158. spikezoo/archs/wgse/eval.py +133 -0
  159. spikezoo/archs/wgse/logs/WGSE-Dwt1dNet-db8-5-ks3/log.txt +11 -0
  160. spikezoo/archs/wgse/submodules.py +68 -0
  161. spikezoo/archs/wgse/train.py +261 -0
  162. spikezoo/archs/wgse/transform.py +139 -0
  163. spikezoo/archs/wgse/utils.py +128 -0
  164. spikezoo/archs/wgse/weights/demo.png +0 -0
  165. spikezoo/data/base/test/gt/200_part1_key_id151.png +0 -0
  166. spikezoo/data/base/test/gt/200_part3_key_id151.png +0 -0
  167. spikezoo/data/base/test/gt/203_part1_key_id151.png +0 -0
  168. spikezoo/data/base/test/spike/200_part1_key_id151.dat +0 -0
  169. spikezoo/data/base/test/spike/200_part3_key_id151.dat +0 -0
  170. spikezoo/data/base/test/spike/203_part1_key_id151.dat +0 -0
  171. spikezoo/data/base/train/gt/203_part2_key_id151.png +0 -0
  172. spikezoo/data/base/train/gt/203_part3_key_id151.png +0 -0
  173. spikezoo/data/base/train/gt/203_part4_key_id151.png +0 -0
  174. spikezoo/data/base/train/spike/203_part2_key_id151.dat +0 -0
  175. spikezoo/data/base/train/spike/203_part3_key_id151.dat +0 -0
  176. spikezoo/data/base/train/spike/203_part4_key_id151.dat +0 -0
  177. spikezoo/datasets/base_dataset.py +2 -3
  178. spikezoo/metrics/__init__.py +1 -1
  179. spikezoo/models/base_model.py +1 -3
  180. spikezoo/pipeline/base_pipeline.py +7 -5
  181. spikezoo/pipeline/train_pipeline.py +1 -1
  182. spikezoo/utils/other_utils.py +16 -6
  183. spikezoo/utils/spike_utils.py +33 -29
  184. spikezoo/utils/vidar_loader.cpython-39-x86_64-linux-gnu.so +0 -0
  185. spikezoo-0.2.1.dist-info/METADATA +167 -0
  186. spikezoo-0.2.1.dist-info/RECORD +211 -0
  187. spikezoo/models/spcsnet_model.py +0 -19
  188. spikezoo-0.1.2.dist-info/METADATA +0 -39
  189. spikezoo-0.1.2.dist-info/RECORD +0 -36
  190. {spikezoo-0.1.2.dist-info → spikezoo-0.2.1.dist-info}/LICENSE.txt +0 -0
  191. {spikezoo-0.1.2.dist-info → spikezoo-0.2.1.dist-info}/WHEEL +0 -0
  192. {spikezoo-0.1.2.dist-info → spikezoo-0.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,398 @@
1
+ import argparse
2
+ import os
3
+ import os.path as osp
4
+ import time
5
+ import numpy as np
6
+ import torch
7
+ import torch.backends.cudnn as cudnn
8
+ from tensorboardX import SummaryWriter
9
+ import datetime
10
+ from datasets import datasets
11
+ from models.get_model import get_model
12
+ from utils import *
13
+ from metrics.psnr import *
14
+ from metrics.ssim import *
15
+ import lpips
16
+ from skimage.metrics import peak_signal_noise_ratio as compare_psnr
17
+ import pprint
18
+ from models.bsf.dsft_convert import convert_dsft4
19
+
20
+ parser = argparse.ArgumentParser()
21
+ ############################ Dataset Root ############################
22
+ parser.add_argument('--dataset_storage', type=str, default='ram') ## ram or disk
23
+ parser.add_argument('--data-root', type=str, default='/dev/shm/rzhao/REDS120fps')
24
+ parser.add_argument('--half_reserve', type=int, default=2, help=' DSFT half reserve + 3ref + 4key + 3ref + DSFT half reserve')
25
+ ############################ Training Params ############################
26
+ parser.add_argument('--arch', '-a', type=str, default='MEPF')
27
+ parser.add_argument('--batch-size', '-bs', type=int, default=8)
28
+ parser.add_argument('--learning-rate', '-lr', type=float, default=2e-4)
29
+ parser.add_argument('--train-res', '-tr', type=int, default=[128, 128], metavar='N', nargs='*')
30
+ parser.add_argument('--input-type', type=str, default='raw_spike', choices=['dsft', 'raw_spike'])
31
+ parser.add_argument('--epochs', '-ep', type=int, default=100)
32
+ parser.add_argument('--workers', '-j', type=int, default=8)
33
+ parser.add_argument('--pretrained', '-prt', type=str, default=None)
34
+ parser.add_argument('--start-epoch', '-sep', type=int, default=0)
35
+ parser.add_argument('--print-freq', '-pf', type=int, default=100)
36
+ parser.add_argument('--save-dir', '-sd', type=str, default='outputs')
37
+ parser.add_argument('--save-name', '-sn', type=str, default=None)
38
+ parser.add_argument('--vis-path', '-vp', type=str, default='vis')
39
+ parser.add_argument('--vis-name', '-vn', type=str, default='model1')
40
+ parser.add_argument('--eval-path', '-evp', type=str, default='eval_vis/model1')
41
+ parser.add_argument('--vis-freq', '-vf', type=int, default=20)
42
+ parser.add_argument('--eval', '-e', action='store_true')
43
+ parser.add_argument('--print_details', '-pd', action='store_true')
44
+ parser.add_argument('--milestones', default=[10, 20, 30, 40, 50, 60, 70, 80, 90, 100], metavar='N', nargs='*')
45
+ parser.add_argument('--lr-scale-factor', '-lrsf', type=float, default=0.7)
46
+ parser.add_argument('--eval-interval', '-ei', type=int, default=5)
47
+ parser.add_argument('--no_imwrite', action='store_true', default=False)
48
+ parser.add_argument('--compile_model', '-cmpmd', action='store_true')
49
+ parser.add_argument('--seed', type=int, default=2728)
50
+ ############################ Params about Dataset ############################
51
+ parser.add_argument('--alpha', type=float, default=0.7)
52
+ parser.add_argument('--eta_list', default=[1.00, 0.75, 0.50], type=float, metavar='N', nargs='*')
53
+ parser.add_argument('--gamma', type=int, default=60)
54
+ ############################ About Optimizer ############################
55
+ parser.add_argument('--solver', type=str, default='Adam')
56
+ parser.add_argument('--momentum', type=float, default=0.9)
57
+ parser.add_argument('--beta', type=float, default=0.999)
58
+ parser.add_argument('--weight_decay', type=float, default=0.0)
59
+
60
+ parser.add_argument('--test_eval', action='store_true')
61
+ parser.add_argument('--logs_file_name', type=str, default='bsf')
62
+
63
+ parser.add_argument('--loss_type', type=str, default='l1')
64
+ parser.add_argument('--dsft_convertor_type', type=int, default=4)
65
+
66
+ parser.add_argument('--no_dsft', action='store_true')
67
+ args = parser.parse_args()
68
+
69
+
70
+
71
+ ##########################################################################################################
72
+ ## configs
73
+ writer_root = 'logs/{:s}/'.format(args.logs_file_name)
74
+ os.makedirs(writer_root, exist_ok=True)
75
+ writer_path = writer_root + args.arch + '.txt'
76
+ writer = open(writer_path, 'a')
77
+
78
+ for k, v in vars(args).items():
79
+ vv = pprint.pformat(v)
80
+ ostr = '{:s} : {:s}'.format(k, vv)
81
+ writer.write(ostr + '\n')
82
+
83
+ args.milestones = [int(m) for m in args.milestones]
84
+ ostr = 'milsones '
85
+ for mmm in args.milestones:
86
+ ostr += '{:d} '.format(mmm)
87
+ writer.write(ostr + '\n')
88
+
89
+ n_iter = 0
90
+
91
+
92
+ def batch_PSNR(img, imclean, data_range):
93
+ Img = img.data.cpu().numpy().astype(np.float32)
94
+ Iclean = imclean.data.cpu().numpy().astype(np.float32)
95
+ PSNR = compare_psnr(Img, Iclean, data_range=data_range)
96
+ '''
97
+ PSNR = 0
98
+ for i in range(Img.shape[0]):
99
+ PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range)
100
+ return (PSNR/Img.shape[0])
101
+ '''
102
+ return PSNR
103
+
104
+
105
+ def train(args, train_loader, model, optimizer, epoch, train_writer):
106
+ ######################################################################
107
+ ## Init
108
+ global n_iter
109
+ batch_time = AverageMeter(precision=3)
110
+ data_time = AverageMeter(precision=3)
111
+
112
+ losses = AverageMeter(precision=6, names=['Loss'])
113
+ batch_psnr = AverageMeter(precision=4)
114
+
115
+ model.train()
116
+
117
+ end = time.time()
118
+
119
+ ######################################################################
120
+ ## Training Loop
121
+ for ww, data in enumerate(train_loader, 0):
122
+ spikes = [spk.float().cuda() for spk in data['spikes']]
123
+ spks = torch.cat(spikes, dim=1)
124
+ central_idx = 10*args.half_reserve + 30
125
+ spks = spks[:, central_idx-30:central_idx+31]
126
+
127
+ if not args.no_dsft:
128
+ dsfts = [d.float().cuda() for d in data['dsft']]
129
+ dsfts = torch.cat(dsfts, dim=1)
130
+ central_idx = 10*args.half_reserve + 30
131
+ dsfts = dsfts[:, central_idx-30:central_idx+31]
132
+
133
+ images = [img.cuda() for img in data['images']]
134
+ norm_fac = data['norm_fac'].unsqueeze_(dim=1).unsqueeze_(dim=1).unsqueeze_(dim=1).cuda().float()
135
+
136
+ data_time.update(time.time() - end)
137
+
138
+ if not args.no_dsft:
139
+ dsft_dict = convert_dsft4(spike=spks, dsft=dsfts)
140
+
141
+ input_dict = {
142
+ 'dsft_dict': dsft_dict,
143
+ 'spikes': spks,
144
+ }
145
+
146
+ gt = images[0]
147
+
148
+ rec = model(input_dict=input_dict)
149
+
150
+ rec = rec / norm_fac
151
+
152
+ if args.loss_type == 'l1':
153
+ loss = (rec - gt).abs().mean()
154
+ elif args.loss_type == 'charbonnier':
155
+ loss = torch.sqrt((rec - gt)**2 + 1e-6).mean()
156
+
157
+ # record loss
158
+ losses.update(loss)
159
+ cur_batch_psnr = batch_PSNR(img=rec, imclean=gt, data_range=1.0)
160
+ batch_psnr.update(cur_batch_psnr)
161
+
162
+ if ww % 10 == 0:
163
+ train_writer.add_scalar('loss', loss.item(), n_iter)
164
+ train_writer.add_scalar('batch_psnr', cur_batch_psnr, n_iter)
165
+
166
+ loss.backward()
167
+ optimizer.step()
168
+ optimizer.zero_grad()
169
+
170
+ batch_time.update(time.time() - end)
171
+ n_iter += 1
172
+
173
+ if n_iter % args.vis_freq == 0:
174
+ vis_img(args.vis_path, torch.clip(rec, 0, 1), args.arch)
175
+
176
+ ostr = 'Epoch: [{:03d}] [{:04d}/{:04d}], Iter: {:6d} '.format(epoch+1, ww, len(train_loader), n_iter-1)
177
+ ostr += 'Time: {}, Data: {} '.format(batch_time, data_time)
178
+ ostr += ' '.join(map('{:s} {:.4f} ({:.6f}) '.format, losses.names, losses.val, losses.avg))
179
+ ostr += 'batch_PSNR {} '.format(batch_psnr)
180
+ ostr += 'lr {:.6f}'.format(optimizer.state_dict()['param_groups'][0]['lr'])
181
+ if ww % args.print_freq == 0:
182
+ writer.write(ostr + '\n')
183
+ end = time.time()
184
+
185
+ return
186
+
187
+
188
+ def validation(args, test_loader_list, model, lpips_function_dict):
189
+ model.eval()
190
+
191
+ for eta, test_loader in zip(args.eta_list, test_loader_list):
192
+ cur_eval_root = osp.join(args.eval_path, args.arch, 'eta_{:.2f}'.format(eta))
193
+ os.makedirs(cur_eval_root, exist_ok=True)
194
+
195
+ global n_iter
196
+ batch_time = AverageMeter()
197
+ data_time = AverageMeter()
198
+ metrics_name = ['PSNR', 'SSIM', 'LPIPS-A', 'LPIPS-V', 'AvgTime']
199
+ metrics = AverageMeter(i=len(metrics_name), precision=4, names=metrics_name)
200
+
201
+ for ww, data in enumerate(test_loader, 0):
202
+ st1 = time.time()
203
+ spks = torch.cat([spk.float().cuda() for spk in data['spikes']], dim=1)
204
+ central_idx = 10*args.half_reserve + 30
205
+ spks = spks[:, central_idx-30:central_idx+31]
206
+
207
+ if not args.no_dsft:
208
+ dsfts = torch.cat([d.float().cuda() for d in data['dsft']], dim=1)
209
+ central_idx = 10*args.half_reserve + 30
210
+ dsfts = dsfts[:, central_idx-30:central_idx+31]
211
+
212
+ images = data['images']
213
+ norm_fac = data['norm_fac'].unsqueeze_(dim=1).unsqueeze_(dim=1).unsqueeze_(dim=1).cuda().float()
214
+
215
+ data_time.update(time.time() - st1)
216
+
217
+ if not args.no_dsft:
218
+ dsft_dict = convert_dsft4(spike=spks, dsft=dsfts)
219
+
220
+ input_dict = {
221
+ 'dsft_dict': dsft_dict,
222
+ 'spikes': spks,
223
+ }
224
+
225
+ with torch.no_grad():
226
+ st = time.time()
227
+ rec = model(input_dict=input_dict)
228
+ mtime = time.time() - st
229
+
230
+ rec = rec / norm_fac
231
+ rec = torch.clip(rec, 0, 1)
232
+ rec_np = torch2numpy255(rec)
233
+ img_np = torch2numpy255(images[0])
234
+
235
+ if not args.no_imwrite:
236
+ cur_vis_path = osp.join(cur_eval_root, '{:03d}.png'.format(ww))
237
+ cv2.imwrite(cur_vis_path, rec_np.astype(np.uint8))
238
+
239
+ cur_psnr = calculate_psnr(rec_np, img_np)
240
+ cur_ssim = calculate_ssim(rec_np, img_np)
241
+ with torch.no_grad():
242
+ cur_lpips_alex = lpips_function_dict['alex'](rec, images[0].cuda())
243
+ cur_lpips_vgg = lpips_function_dict['vgg'](rec, images[0].cuda())
244
+
245
+ cur_metrics_list = [cur_psnr, cur_ssim, cur_lpips_alex.item(), cur_lpips_vgg.item() , mtime]
246
+ metrics.update(cur_metrics_list)
247
+
248
+ torch.cuda.empty_cache()
249
+ ostr = 'Eta {:.2f} ALL '.format(eta) + ' '.join(map('{:s} {:.4f}'.format, metrics.names, metrics.avg))
250
+ writer.write(ostr + '\n')
251
+
252
+ return
253
+
254
+
255
+ def main():
256
+ ##########################################################################################################
257
+ # Set random seeds
258
+ set_seeds(args.seed)
259
+
260
+ # Create save path and logs
261
+ timestamp1 = datetime.datetime.now().strftime('%m-%d')
262
+ timestamp2 = datetime.datetime.now().strftime('%H%M%S')
263
+
264
+ save_folder_name = 'a_{:s}_b{:d}_{:s}'.format(args.arch, args.batch_size, timestamp2)
265
+
266
+ save_path = osp.join(args.save_dir, timestamp1, save_folder_name)
267
+ make_dir(save_path)
268
+ ostr = '=>Save path: ' + save_path
269
+ writer.write(ostr + '\n')
270
+ # print('=>Save path: ', save_path)
271
+ train_writer = SummaryWriter(save_path)
272
+
273
+ make_dir(args.vis_path)
274
+ make_dir(args.eval_path)
275
+
276
+ model = None
277
+ optimizer = None
278
+
279
+ ##########################################################################################################
280
+ ## Create model
281
+ print(args.arch)
282
+ model = get_model(args)
283
+
284
+ if args.compile_model and (torch.__version__ >= '2.0.0'):
285
+ ostr = 'Start compile the model'
286
+ writer.write(ostr + '\n')
287
+ st = time.time()
288
+ torch.compile(model)
289
+ ostr = 'Finish compiling the model Time {:.2f}s'.format(time.time() - st)
290
+ writer.write(ostr + '\n')
291
+
292
+ if args.pretrained != None:
293
+ network_data = torch.load(args.pretrained)
294
+ ostr = '=> using pretrained model {:s}'.format(args.pretrained)
295
+ writer.write(ostr + '\n')
296
+ ostr = '=> model params: {:.6f}M'.format(model.num_parameters()/1e6)
297
+ writer.write(ostr + '\n')
298
+ model = torch.nn.DataParallel(model).cuda()
299
+ model = model.cuda()
300
+ model.load_state_dict(network_data)
301
+ else:
302
+ network_data = None
303
+ ostr = '=> train from scratch'
304
+ writer.write(ostr + '\n')
305
+ model.init_weights()
306
+ ostr = '=> model params: {:.6f}M'.format(model.num_parameters()/1e6)
307
+ writer.write(ostr + '\n')
308
+ model = torch.nn.DataParallel(model).cuda()
309
+ model = model.cuda()
310
+
311
+ cudnn.benchmark = True
312
+
313
+ ##########################################################################################################
314
+ ## Create Optimizer
315
+ assert(args.solver in ['Adam', 'SGD'])
316
+ ostr = '=> settings {:s} solver'.format(args.solver)
317
+ writer.write(ostr + '\n')
318
+ optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
319
+
320
+
321
+ ##########################################################################################################
322
+ ## Dataset
323
+ train_set = datasets.sreds_train(args)
324
+ train_loader = torch.utils.data.DataLoader(
325
+ train_set,
326
+ drop_last=False,
327
+ batch_size=args.batch_size,
328
+ shuffle=True,
329
+ num_workers=args.workers,
330
+ )
331
+
332
+ test_loader_list = []
333
+ for eta in args.eta_list:
334
+ test_set = datasets.sreds_test_small(args, eta=eta)
335
+ test_loader = torch.utils.data.DataLoader(
336
+ test_set,
337
+ drop_last=False,
338
+ batch_size=1,
339
+ shuffle=False,
340
+ num_workers=args.workers,
341
+ )
342
+ test_loader_list.append(test_loader)
343
+
344
+ ##########################################################################################################
345
+ ## For LPIPS
346
+ loss_fn_alex = lpips.LPIPS(net='alex').cuda() # closer to "traditional" perceptual loss, when used for optimization
347
+ loss_fn_vgg = lpips.LPIPS(net='vgg').cuda() # closer to "traditional" perceptual loss, when used for optimization
348
+ lpips_function_dict = {'alex': loss_fn_alex, 'vgg': loss_fn_vgg}
349
+
350
+ ##########################################################################################################
351
+ ## Train or Evaluate
352
+ if args.test_eval:
353
+ validation(
354
+ args=args,
355
+ test_loader_list=test_loader_list,
356
+ model=model,
357
+ lpips_function_dict=lpips_function_dict,
358
+ )
359
+ return
360
+
361
+ epoch = args.start_epoch
362
+ while(True):
363
+ train(
364
+ args=args,
365
+ train_loader=train_loader,
366
+ model=model,
367
+ optimizer=optimizer,
368
+ epoch=epoch,
369
+ train_writer=train_writer,
370
+ )
371
+ epoch += 1
372
+
373
+ # scheduler can be added here
374
+ if epoch in args.milestones:
375
+ for param_group in optimizer.param_groups:
376
+ param_group['lr'] = param_group['lr'] * args.lr_scale_factor
377
+
378
+ # save model
379
+ if epoch % 5 == 0:
380
+ model_save_name = '{:s}_epoch{:03d}.pth'.format(args.arch, epoch)
381
+ torch.save(model.state_dict(), osp.join(save_path, model_save_name))
382
+
383
+ # if epoch % 5 == 0:
384
+ if epoch % args.eval_interval == 0:
385
+ validation(
386
+ args=args,
387
+ test_loader_list=test_loader_list,
388
+ model=model,
389
+ lpips_function_dict=lpips_function_dict,
390
+ )
391
+
392
+
393
+ if epoch >= args.epochs:
394
+ break
395
+
396
+
397
+ if __name__ == '__main__':
398
+ main()
@@ -0,0 +1,22 @@
1
+ import math
2
+ import numpy as np
3
+
4
+ # --------------------------------------------
5
+ # PSNR
6
+ # --------------------------------------------
7
+ def calculate_psnr(img1, img2, border=0):
8
+ # img1 and img2 have range [0, 255]
9
+ #img1 = img1.squeeze()
10
+ #img2 = img2.squeeze()
11
+ if not img1.shape == img2.shape:
12
+ raise ValueError('Input images must have the same dimensions.')
13
+ h, w = img1.shape[:2]
14
+ img1 = img1[border:h-border, border:w-border]
15
+ img2 = img2[border:h-border, border:w-border]
16
+
17
+ img1 = img1.astype(np.float64)
18
+ img2 = img2.astype(np.float64)
19
+ mse = np.mean((img1 - img2)**2)
20
+ if mse == 0:
21
+ return float('inf')
22
+ return 20 * math.log10(255.0 / math.sqrt(mse))
@@ -0,0 +1,54 @@
1
+ import numpy as np
2
+ import cv2
3
+
4
+ # --------------------------------------------
5
+ # SSIM
6
+ # --------------------------------------------
7
+ def calculate_ssim(img1, img2, border=0):
8
+ '''calculate SSIM
9
+ the same outputs as MATLAB's
10
+ img1, img2: [0, 255]
11
+ '''
12
+ #img1 = img1.squeeze()
13
+ #img2 = img2.squeeze()
14
+ if not img1.shape == img2.shape:
15
+ raise ValueError('Input images must have the same dimensions.')
16
+ h, w = img1.shape[:2]
17
+ img1 = img1[border:h-border, border:w-border]
18
+ img2 = img2[border:h-border, border:w-border]
19
+
20
+ if img1.ndim == 2:
21
+ return ssim(img1, img2)
22
+ elif img1.ndim == 3:
23
+ if img1.shape[2] == 3:
24
+ ssims = []
25
+ for i in range(3):
26
+ ssims.append(ssim(img1[:,:,i], img2[:,:,i]))
27
+ return np.array(ssims).mean()
28
+ elif img1.shape[2] == 1:
29
+ return ssim(np.squeeze(img1), np.squeeze(img2))
30
+ else:
31
+ raise ValueError('Wrong input image dimensions.')
32
+
33
+
34
+ def ssim(img1, img2):
35
+ C1 = (0.01 * 255)**2
36
+ C2 = (0.03 * 255)**2
37
+
38
+ img1 = img1.astype(np.float64)
39
+ img2 = img2.astype(np.float64)
40
+ kernel = cv2.getGaussianKernel(11, 1.5)
41
+ window = np.outer(kernel, kernel.transpose())
42
+
43
+ mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
44
+ mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
45
+ mu1_sq = mu1**2
46
+ mu2_sq = mu2**2
47
+ mu1_mu2 = mu1 * mu2
48
+ sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
49
+ sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
50
+ sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
51
+
52
+ ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
53
+ (sigma1_sq + sigma2_sq + C2))
54
+ return ssim_map.mean()
@@ -0,0 +1,154 @@
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from torchvision.ops import DeformConv2d
5
+
6
+
7
+ class CAPA(nn.Module):
8
+ def __init__(self, chnn, sc=11):
9
+ super().__init__()
10
+ self.sc = sc
11
+ self.unfold = nn.Unfold(kernel_size=3*self.sc, dilation=1, padding=self.sc, stride=self.sc)
12
+ self.scale = chnn ** -0.5
13
+ self.to_q = nn.Conv2d(chnn, chnn, 1, bias=False)
14
+ self.to_k = nn.Conv2d(chnn, chnn, 1, bias=False)
15
+ self.to_v = nn.Conv2d(chnn, chnn, 1, bias=False)
16
+ self.gamma = nn.Parameter(torch.zeros(1))
17
+ self.mask_k = True
18
+
19
+ def forward(self, x_key, x_ref):
20
+ b, c, h_in, w_in = x_key.shape
21
+ x_pad = self.sc - w_in % self.sc
22
+ y_pad = self.sc - h_in % self.sc
23
+ feat_key = F.pad(x_key, (0, x_pad, 0, y_pad))
24
+ feat_ref = F.pad(x_ref, (0, x_pad, 0, y_pad))
25
+ b, c, h, w = feat_key.shape
26
+ h_sc = h // self.sc
27
+ w_sc = w // self.sc
28
+
29
+ fm = torch.ones(1, 1, h_in, w_in).to(feat_key.device)
30
+ fm = F.pad(fm, (0, x_pad, 0, y_pad))
31
+ fm_k = self.unfold(fm).view(1, 1, -1, h_sc*w_sc)
32
+ fm_q = fm.view(1, 1, h_sc, self.sc, w_sc, self.sc).permute(0, 1, 2, 4, 3, 5).contiguous().view(1, 1, h_sc*w_sc, self.sc**2)
33
+ am = torch.einsum('b c k n, b c n s -> b k n s', fm_k, fm_q)
34
+ am = (am - 1) * 99.
35
+ am = am.repeat(b, 1, 1, 1)
36
+
37
+ feat_q = self.to_q(feat_key)
38
+ feat_k = self.to_k(feat_ref)
39
+ feat_k = self.unfold(feat_k).view(b, c, -1, h_sc*w_sc)
40
+ feat_k = self.scale * feat_k
41
+ feat_q = feat_q.view(b, c, h_sc, self.sc, w_sc, self.sc).permute(0, 1, 2, 4, 3, 5).contiguous().view(b, c, h_sc*w_sc, self.sc**2)
42
+ attn = torch.einsum('b c k n, b c n s -> b k n s', feat_k, feat_q)
43
+
44
+
45
+ attn = attn + am
46
+ self.attn = F.softmax(attn, dim=1)
47
+
48
+ feat_v = self.to_v(feat_ref)
49
+ feat_v = self.unfold(feat_v).view(b, c, -1, h_sc*w_sc)
50
+ feat_r = torch.einsum('b k n s, b c k n -> b c n s', self.attn, feat_v)
51
+ feat_r = feat_r.view(b, c, h_sc, w_sc, self.sc, self.sc).permute(0, 1, 2, 4, 3, 5).contiguous().view(b, c, h, w)
52
+ feat_r = feat_r[:,:,:h_in,:w_in]
53
+ feat_o = x_ref + feat_r * self.gamma
54
+ return feat_o
55
+
56
+
57
+ class Multi_Granularity_Align_One_Level(nn.Module):
58
+ def __init__(self, base_dim=64, offset_groups=4, act=nn.ReLU(), memory=True):
59
+ super().__init__()
60
+ self.offset_groups = offset_groups
61
+ self.memory = memory
62
+
63
+ if self.memory:
64
+ first_output_dim = base_dim
65
+ else:
66
+ first_output_dim = 3*self.offset_groups*3*3
67
+
68
+ self.offset_conv_1 = self._make_two_conv_layer(input_dim=base_dim*2, hidden_dim=base_dim, output_dim=first_output_dim, kernel_size=3, stride=1, padding=1, act=act)
69
+ if self.memory:
70
+ self.offset_conv2_1 = self._make_two_conv_layer(input_dim=base_dim + 3*self.offset_groups*3*3, hidden_dim=base_dim, output_dim=3*self.offset_groups*3*3, kernel_size=3, stride=1, padding=1, act=act)
71
+ self.fuse_feat = self._make_two_conv_layer(input_dim=base_dim*2, hidden_dim=base_dim, output_dim=base_dim, kernel_size=3, stride=1, padding=1, act=act)
72
+
73
+ def _make_two_conv_layer(self, input_dim, hidden_dim, output_dim, kernel_size, stride, padding, act):
74
+ layer = nn.Sequential(
75
+ nn.Conv2d(input_dim, hidden_dim, kernel_size=kernel_size, stride=stride, padding=padding),
76
+ act,
77
+ nn.Conv2d(hidden_dim, output_dim, kernel_size=kernel_size, stride=1, padding=padding),
78
+ )
79
+ return layer
80
+
81
+ def forward(self, feat_list, kpa, deform_conv, prev_offset_feat=None, prev_feat=None):
82
+ xa, xb, xc, xd, xe = feat_list
83
+
84
+ xa_kpa, xb_kpa, xd_kpa, xe_kpa = [kpa(x_key=xc, x_ref=xxx) for xxx in [xa, xb, xd, xe]]
85
+ feat_for_conv_offset1 = [torch.cat([xxx, xc], dim=1) for xxx in [xa_kpa, xb_kpa, xd_kpa, xe_kpa]]
86
+
87
+ offset_feat_list1 = [self.offset_conv_1(f) for f in feat_for_conv_offset1]
88
+ if self.memory:
89
+ prev_offset_upsample_list1 = [F.interpolate(offset_feat, scale_factor=2, mode='bilinear') for offset_feat in prev_offset_feat]
90
+ offset_feat_list1 = [self.offset_conv2_1(torch.cat((f1, f2), dim=1)) for f1, f2 in zip(offset_feat_list1, prev_offset_upsample_list1)]
91
+
92
+ o1o2m_abde_list1 = [f.chunk(3, dim=1) for f in offset_feat_list1]
93
+ offset_abde_list1 = [torch.cat((o1o2m[0], o1o2m[1]), dim=1) for o1o2m in o1o2m_abde_list1]
94
+ mask_abde_list1 = [torch.sigmoid(o1o2m[2]) for o1o2m in o1o2m_abde_list1]
95
+
96
+ x_align_abde = [deform_conv(input=xx, offset=offset, mask=mask) for xx,offset,mask in zip([xa_kpa, xb_kpa, xd_kpa, xe_kpa], offset_abde_list1, mask_abde_list1)]
97
+
98
+ if self.memory:
99
+ prev_x_abde_align_upasmple_list = [F.interpolate(xxx, scale_factor=2, mode='bilinear') for xxx in prev_feat]
100
+ x_align_abde = [self.fuse_feat(torch.cat((x_align, prev_x_align_upsample), dim=1)) for x_align, prev_x_align_upsample in zip(x_align_abde, prev_x_abde_align_upasmple_list)]
101
+
102
+ xa_align, xb_align, xd_align, xe_align = x_align_abde
103
+ x_align = (xa_align, xb_align, xc, xd_align, xe_align)
104
+ return x_align, offset_feat_list1
105
+
106
+
107
+ class Multi_Granularity_Align(nn.Module):
108
+ def __init__(self, base_dim=64, groups=4, act=nn.ReLU(), sc=11):
109
+ super().__init__()
110
+ self.offset_groups = groups
111
+ self.sc = sc
112
+
113
+ self.kpa = CAPA(chnn=base_dim, sc=self.sc)
114
+ self.deform_conv = DeformConv2d(in_channels=base_dim, out_channels=base_dim, kernel_size=3, stride=1, padding=1, groups=1)
115
+
116
+ ## Downsample
117
+ self.conv_ds_L2 = self._make_two_conv_layer(input_dim=base_dim, hidden_dim=base_dim, output_dim=base_dim, kernel_size=3, stride=2, padding=1, act=act)
118
+ self.conv_ds_L3 = self._make_two_conv_layer(input_dim=base_dim, hidden_dim=base_dim, output_dim=base_dim, kernel_size=3, stride=2, padding=1, act=act)
119
+
120
+ self.align_L3 = Multi_Granularity_Align_One_Level(base_dim=base_dim, offset_groups=self.offset_groups, act=act, memory=False)
121
+ self.align_L2 = Multi_Granularity_Align_One_Level(base_dim=base_dim, offset_groups=self.offset_groups, act=act, memory=True)
122
+ self.align_L1 = Multi_Granularity_Align_One_Level(base_dim=base_dim, offset_groups=self.offset_groups, act=act, memory=True)
123
+
124
+ def _make_two_conv_layer(self, input_dim, hidden_dim, output_dim, kernel_size, stride, padding, act):
125
+ layer = nn.Sequential(
126
+ nn.Conv2d(input_dim, hidden_dim, kernel_size=kernel_size, stride=stride, padding=padding),
127
+ act,
128
+ nn.Conv2d(hidden_dim, output_dim, kernel_size=kernel_size, stride=1, padding=padding),
129
+ )
130
+ return layer
131
+
132
+ def forward(self, feat_list):
133
+ '''
134
+ feat_list: xa, xb, xc, xd, xe
135
+ '''
136
+
137
+ xa, xb, xc, xd, xe = feat_list
138
+ ## Downsample
139
+ feat_batch_cat_L1 = torch.cat(feat_list, dim=0)
140
+ feat_batch_cat_L2 = self.conv_ds_L2(feat_batch_cat_L1)
141
+ feat_batch_cat_L3 = self.conv_ds_L3(feat_batch_cat_L2)
142
+
143
+ L3_align_feat_list, L3_offset_feat_list = self.align_L3(feat_list=feat_batch_cat_L3.chunk(5, dim=0), kpa=self.kpa, deform_conv=self.deform_conv,
144
+ prev_offset_feat=None, prev_feat=None)
145
+
146
+ L2_align_feat_list, L2_offset_feat_list = self.align_L2(feat_list=feat_batch_cat_L2.chunk(5, dim=0), kpa=self.kpa, deform_conv=self.deform_conv,
147
+ prev_offset_feat=L3_offset_feat_list, prev_feat=L3_align_feat_list)
148
+
149
+ L1_align_feat_list, L1_offset_feat_list = self.align_L1(feat_list=feat_batch_cat_L1.chunk(5, dim=0), kpa=self.kpa, deform_conv=self.deform_conv,
150
+ prev_offset_feat=L2_offset_feat_list, prev_feat=L2_align_feat_list)
151
+
152
+ return L1_align_feat_list
153
+
154
+