learning3d 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. learning3d/__init__.py +2 -0
  2. learning3d/data_utils/__init__.py +4 -0
  3. learning3d/data_utils/dataloaders.py +454 -0
  4. learning3d/data_utils/user_data.py +119 -0
  5. learning3d/examples/test_dcp.py +139 -0
  6. learning3d/examples/test_deepgmr.py +144 -0
  7. learning3d/examples/test_flownet.py +113 -0
  8. learning3d/examples/test_masknet.py +159 -0
  9. learning3d/examples/test_masknet2.py +162 -0
  10. learning3d/examples/test_pcn.py +118 -0
  11. learning3d/examples/test_pcrnet.py +120 -0
  12. learning3d/examples/test_pnlk.py +121 -0
  13. learning3d/examples/test_pointconv.py +126 -0
  14. learning3d/examples/test_pointnet.py +121 -0
  15. learning3d/examples/test_prnet.py +126 -0
  16. learning3d/examples/test_rpmnet.py +120 -0
  17. learning3d/examples/train_PointNetLK.py +240 -0
  18. learning3d/examples/train_dcp.py +249 -0
  19. learning3d/examples/train_deepgmr.py +244 -0
  20. learning3d/examples/train_flownet.py +259 -0
  21. learning3d/examples/train_masknet.py +239 -0
  22. learning3d/examples/train_pcn.py +216 -0
  23. learning3d/examples/train_pcrnet.py +228 -0
  24. learning3d/examples/train_pointconv.py +245 -0
  25. learning3d/examples/train_pointnet.py +244 -0
  26. learning3d/examples/train_prnet.py +229 -0
  27. learning3d/examples/train_rpmnet.py +228 -0
  28. learning3d/losses/__init__.py +12 -0
  29. learning3d/losses/chamfer_distance.py +51 -0
  30. learning3d/losses/classification.py +14 -0
  31. learning3d/losses/correspondence_loss.py +10 -0
  32. learning3d/losses/cuda/chamfer_distance/__init__.py +1 -0
  33. learning3d/losses/cuda/chamfer_distance/chamfer_distance.cpp +185 -0
  34. learning3d/losses/cuda/chamfer_distance/chamfer_distance.cu +209 -0
  35. learning3d/losses/cuda/chamfer_distance/chamfer_distance.py +66 -0
  36. learning3d/losses/cuda/emd_torch/pkg/emd_loss_layer.py +41 -0
  37. learning3d/losses/cuda/emd_torch/pkg/include/cuda/emd.cuh +347 -0
  38. learning3d/losses/cuda/emd_torch/pkg/include/cuda_helper.h +18 -0
  39. learning3d/losses/cuda/emd_torch/pkg/include/emd.h +54 -0
  40. learning3d/losses/cuda/emd_torch/pkg/layer/__init__.py +1 -0
  41. learning3d/losses/cuda/emd_torch/pkg/layer/emd_loss_layer.py +40 -0
  42. learning3d/losses/cuda/emd_torch/pkg/src/cuda/emd.cu +70 -0
  43. learning3d/losses/cuda/emd_torch/pkg/src/emd.cpp +1 -0
  44. learning3d/losses/cuda/emd_torch/setup.py +29 -0
  45. learning3d/losses/emd.py +16 -0
  46. learning3d/losses/frobenius_norm.py +21 -0
  47. learning3d/losses/rmse_features.py +16 -0
  48. learning3d/models/__init__.py +23 -0
  49. learning3d/models/classifier.py +41 -0
  50. learning3d/models/dcp.py +92 -0
  51. learning3d/models/deepgmr.py +165 -0
  52. learning3d/models/dgcnn.py +92 -0
  53. learning3d/models/flownet3d.py +446 -0
  54. learning3d/models/masknet.py +84 -0
  55. learning3d/models/masknet2.py +264 -0
  56. learning3d/models/pcn.py +164 -0
  57. learning3d/models/pcrnet.py +74 -0
  58. learning3d/models/pointconv.py +108 -0
  59. learning3d/models/pointnet.py +108 -0
  60. learning3d/models/pointnetlk.py +173 -0
  61. learning3d/models/pooling.py +15 -0
  62. learning3d/models/ppfnet.py +102 -0
  63. learning3d/models/prnet.py +431 -0
  64. learning3d/models/rpmnet.py +359 -0
  65. learning3d/models/segmentation.py +38 -0
  66. learning3d/ops/__init__.py +0 -0
  67. learning3d/ops/data_utils.py +45 -0
  68. learning3d/ops/invmat.py +134 -0
  69. learning3d/ops/quaternion.py +218 -0
  70. learning3d/ops/se3.py +157 -0
  71. learning3d/ops/sinc.py +229 -0
  72. learning3d/ops/so3.py +213 -0
  73. learning3d/ops/transform_functions.py +342 -0
  74. learning3d/utils/__init__.py +9 -0
  75. learning3d/utils/lib/build/lib.linux-x86_64-3.5/pointnet2_cuda.cpython-35m-x86_64-linux-gnu.so +0 -0
  76. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query.o +0 -0
  77. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query_gpu.o +0 -0
  78. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points.o +0 -0
  79. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points_gpu.o +0 -0
  80. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate.o +0 -0
  81. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate_gpu.o +0 -0
  82. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/pointnet2_api.o +0 -0
  83. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling.o +0 -0
  84. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling_gpu.o +0 -0
  85. learning3d/utils/lib/dist/pointnet2-0.0.0-py3.5-linux-x86_64.egg +0 -0
  86. learning3d/utils/lib/pointnet2.egg-info/SOURCES.txt +14 -0
  87. learning3d/utils/lib/pointnet2.egg-info/dependency_links.txt +1 -0
  88. learning3d/utils/lib/pointnet2.egg-info/top_level.txt +1 -0
  89. learning3d/utils/lib/pointnet2_modules.py +160 -0
  90. learning3d/utils/lib/pointnet2_utils.py +318 -0
  91. learning3d/utils/lib/pytorch_utils.py +236 -0
  92. learning3d/utils/lib/setup.py +23 -0
  93. learning3d/utils/lib/src/ball_query.cpp +25 -0
  94. learning3d/utils/lib/src/ball_query_gpu.cu +67 -0
  95. learning3d/utils/lib/src/ball_query_gpu.h +15 -0
  96. learning3d/utils/lib/src/cuda_utils.h +15 -0
  97. learning3d/utils/lib/src/group_points.cpp +36 -0
  98. learning3d/utils/lib/src/group_points_gpu.cu +86 -0
  99. learning3d/utils/lib/src/group_points_gpu.h +22 -0
  100. learning3d/utils/lib/src/interpolate.cpp +65 -0
  101. learning3d/utils/lib/src/interpolate_gpu.cu +233 -0
  102. learning3d/utils/lib/src/interpolate_gpu.h +36 -0
  103. learning3d/utils/lib/src/pointnet2_api.cpp +25 -0
  104. learning3d/utils/lib/src/sampling.cpp +46 -0
  105. learning3d/utils/lib/src/sampling_gpu.cu +253 -0
  106. learning3d/utils/lib/src/sampling_gpu.h +29 -0
  107. learning3d/utils/pointconv_util.py +382 -0
  108. learning3d/utils/ppfnet_util.py +244 -0
  109. learning3d/utils/svd.py +59 -0
  110. learning3d/utils/transformer.py +243 -0
  111. learning3d-0.0.1.dist-info/LICENSE +21 -0
  112. learning3d-0.0.1.dist-info/METADATA +271 -0
  113. learning3d-0.0.1.dist-info/RECORD +115 -0
  114. learning3d-0.0.1.dist-info/WHEEL +5 -0
  115. learning3d-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,216 @@
1
+ # author: Vinit Sarode (vinitsarode5@gmail.com) 03/23/2020
2
+
3
+ import argparse
4
+ import os
5
+ import sys
6
+ import logging
7
+ import numpy
8
+ import numpy as np
9
+ import torch
10
+ import torch.utils.data
11
+ import torchvision
12
+ from torch.utils.data import DataLoader
13
+ from tensorboardX import SummaryWriter
14
+ from tqdm import tqdm
15
+
16
+ # Only if the files are in example folder.
17
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
18
+ if BASE_DIR[-8:] == 'examples':
19
+ sys.path.append(os.path.join(BASE_DIR, os.pardir))
20
+ os.chdir(os.path.join(BASE_DIR, os.pardir))
21
+
22
+ from learning3d.models import PCN
23
+ from learning3d.losses import ChamferDistanceLoss
24
+ from learning3d.data_utils import ClassificationData, ModelNet40Data
25
+
26
+ def _init_(args):
27
+ if not os.path.exists('checkpoints'):
28
+ os.makedirs('checkpoints')
29
+ if not os.path.exists('checkpoints/' + args.exp_name):
30
+ os.makedirs('checkpoints/' + args.exp_name)
31
+ if not os.path.exists('checkpoints/' + args.exp_name + '/' + 'models'):
32
+ os.makedirs('checkpoints/' + args.exp_name + '/' + 'models')
33
+ os.system('cp train_pcn.py checkpoints' + '/' + args.exp_name + '/' + 'main.py.backup')
34
+
35
+
36
+ class IOStream:
37
+ def __init__(self, path):
38
+ self.f = open(path, 'a')
39
+
40
+ def cprint(self, text):
41
+ print(text)
42
+ self.f.write(text + '\n')
43
+ self.f.flush()
44
+
45
+ def close(self):
46
+ self.f.close()
47
+
48
+ def test_one_epoch(device, model, test_loader):
49
+ model.eval()
50
+ test_loss = 0.0
51
+ pred = 0.0
52
+ count = 0
53
+ for i, data in enumerate(tqdm(test_loader)):
54
+ points, _ = data
55
+ points = points.to(device)
56
+
57
+ output = model(points)
58
+ loss_val = ChamferDistanceLoss()(points, output['coarse_output'])
59
+
60
+ test_loss += loss_val.item()
61
+ count += 1
62
+
63
+ test_loss = float(test_loss)/count
64
+ return test_loss
65
+
66
+ def test(args, model, test_loader, textio):
67
+ test_loss = test_one_epoch(args.device, model, test_loader)
68
+ textio.cprint('Validation Loss: %f'%(test_loss))
69
+
70
+ def train_one_epoch(device, model, train_loader, optimizer):
71
+ model.train()
72
+ train_loss = 0.0
73
+ pred = 0.0
74
+ count = 0
75
+ for i, data in enumerate(tqdm(train_loader)):
76
+ points, _ = data
77
+ points = points.to(device)
78
+
79
+ output = model(points)
80
+ loss_val = ChamferDistanceLoss()(points, output['coarse_output'])
81
+
82
+ # backward + optimize
83
+ optimizer.zero_grad()
84
+ loss_val.backward()
85
+ optimizer.step()
86
+
87
+ train_loss += loss_val.item()
88
+ count += 1
89
+
90
+ train_loss = float(train_loss)/count
91
+ return train_loss
92
+
93
+ def train(args, model, train_loader, test_loader, boardio, textio, checkpoint):
94
+ learnable_params = filter(lambda p: p.requires_grad, model.parameters())
95
+ if args.optimizer == 'Adam':
96
+ optimizer = torch.optim.Adam(learnable_params)
97
+ else:
98
+ optimizer = torch.optim.SGD(learnable_params, lr=0.1)
99
+
100
+ if checkpoint is not None:
101
+ min_loss = checkpoint['min_loss']
102
+ optimizer.load_state_dict(checkpoint['optimizer'])
103
+
104
+ best_test_loss = np.inf
105
+
106
+ for epoch in range(args.start_epoch, args.epochs):
107
+ train_loss = train_one_epoch(args.device, model, train_loader, optimizer)
108
+ test_loss = test_one_epoch(args.device, model, test_loader)
109
+
110
+ if test_loss<best_test_loss:
111
+ best_test_loss = test_loss
112
+ snap = {'epoch': epoch + 1,
113
+ 'model': model.state_dict(),
114
+ 'min_loss': best_test_loss,
115
+ 'optimizer' : optimizer.state_dict(),}
116
+ torch.save(snap, 'checkpoints/%s/models/best_model_snap.t7' % (args.exp_name))
117
+ torch.save(model.state_dict(), 'checkpoints/%s/models/best_model.t7' % (args.exp_name))
118
+
119
+ torch.save(snap, 'checkpoints/%s/models/model_snap.t7' % (args.exp_name))
120
+ torch.save(model.state_dict(), 'checkpoints/%s/models/model.t7' % (args.exp_name))
121
+
122
+ boardio.add_scalar('Train Loss', train_loss, epoch+1)
123
+ boardio.add_scalar('Test Loss', test_loss, epoch+1)
124
+ boardio.add_scalar('Best Test Loss', best_test_loss, epoch+1)
125
+
126
+ textio.cprint('EPOCH:: %d, Traininig Loss: %f, Testing Loss: %f, Best Loss: %f'%(epoch+1, train_loss, test_loss, best_test_loss))
127
+
128
+ def options():
129
+ parser = argparse.ArgumentParser(description='Point Completion Network')
130
+ parser.add_argument('--exp_name', type=str, default='exp_pcn', metavar='N',
131
+ help='Name of the experiment')
132
+ parser.add_argument('--dataset_path', type=str, default='ModelNet40',
133
+ metavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'
134
+ parser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')
135
+
136
+ # settings for input data
137
+ parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
138
+ metavar='DATASET', help='dataset type (default: modelnet)')
139
+ parser.add_argument('--num_points', default=1024, type=int,
140
+ metavar='N', help='points in point-cloud (default: 1024)')
141
+
142
+ # settings for PCN
143
+ parser.add_argument('--emb_dims', default=1024, type=int,
144
+ metavar='K', help='dim. of the feature vector (default: 1024)')
145
+ parser.add_argument('--detailed_output', default=False, type=bool,
146
+ help='Coarse + Fine Output')
147
+
148
+ # settings for on training
149
+ parser.add_argument('--seed', type=int, default=1234)
150
+ parser.add_argument('-j', '--workers', default=4, type=int,
151
+ metavar='N', help='number of data loading workers (default: 4)')
152
+ parser.add_argument('-b', '--batch_size', default=32, type=int,
153
+ metavar='N', help='mini-batch size (default: 32)')
154
+ parser.add_argument('--epochs', default=200, type=int,
155
+ metavar='N', help='number of total epochs to run')
156
+ parser.add_argument('--start_epoch', default=0, type=int,
157
+ metavar='N', help='manual epoch number (useful on restarts)')
158
+ parser.add_argument('--optimizer', default='Adam', choices=['Adam', 'SGD'],
159
+ metavar='METHOD', help='name of an optimizer (default: Adam)')
160
+ parser.add_argument('--resume', default='', type=str,
161
+ metavar='PATH', help='path to latest checkpoint (default: null (no-use))')
162
+ parser.add_argument('--pretrained', default='', type=str,
163
+ metavar='PATH', help='path to pretrained model file (default: null (no-use))')
164
+ parser.add_argument('--device', default='cuda:0', type=str,
165
+ metavar='DEVICE', help='use CUDA if available')
166
+
167
+ args = parser.parse_args()
168
+ return args
169
+
170
+ def main():
171
+ args = options()
172
+ args.dataset_path = os.path.join(os.getcwd(), os.pardir, os.pardir, 'ModelNet40', 'ModelNet40')
173
+
174
+ torch.backends.cudnn.deterministic = True
175
+ torch.manual_seed(args.seed)
176
+ torch.cuda.manual_seed_all(args.seed)
177
+ np.random.seed(args.seed)
178
+
179
+ boardio = SummaryWriter(log_dir='checkpoints/' + args.exp_name)
180
+ _init_(args)
181
+
182
+ textio = IOStream('checkpoints/' + args.exp_name + '/run.log')
183
+ textio.cprint(str(args))
184
+
185
+
186
+ trainset = ClassificationData(ModelNet40Data(train=True))
187
+ testset = ClassificationData(ModelNet40Data(train=False))
188
+ train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
189
+ test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
190
+
191
+ if not torch.cuda.is_available():
192
+ args.device = 'cpu'
193
+ args.device = torch.device(args.device)
194
+
195
+ # Create PointNet Model.
196
+ model = PCN(emb_dims=args.emb_dims, detailed_output=args.detailed_output)
197
+
198
+ checkpoint = None
199
+ if args.resume:
200
+ assert os.path.isfile(args.resume)
201
+ checkpoint = torch.load(args.resume)
202
+ args.start_epoch = checkpoint['epoch']
203
+ model.load_state_dict(checkpoint['model'])
204
+
205
+ if args.pretrained:
206
+ assert os.path.isfile(args.pretrained)
207
+ model.load_state_dict(torch.load(args.pretrained, map_location='cpu'))
208
+ model.to(args.device)
209
+
210
+ if args.eval:
211
+ test(args, model, test_loader, textio)
212
+ else:
213
+ train(args, model, train_loader, test_loader, boardio, textio, checkpoint)
214
+
215
+ if __name__ == '__main__':
216
+ main()
@@ -0,0 +1,228 @@
1
+ import argparse
2
+ import os
3
+ import sys
4
+ import logging
5
+ import numpy
6
+ import numpy as np
7
+ import torch
8
+ import torch.utils.data
9
+ import torchvision
10
+ from torch.utils.data import DataLoader
11
+ from tensorboardX import SummaryWriter
12
+ from tqdm import tqdm
13
+
14
+ # Only if the files are in example folder.
15
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
16
+ if BASE_DIR[-8:] == 'examples':
17
+ sys.path.append(os.path.join(BASE_DIR, os.pardir))
18
+ os.chdir(os.path.join(BASE_DIR, os.pardir))
19
+
20
+ from learning3d.models import PointNet
21
+ from learning3d.models import iPCRNet
22
+ from learning3d.losses import ChamferDistanceLoss
23
+ from learning3d.data_utils import RegistrationData, ModelNet40Data
24
+
25
+ def _init_(args):
26
+ if not os.path.exists('checkpoints'):
27
+ os.makedirs('checkpoints')
28
+ if not os.path.exists('checkpoints/' + args.exp_name):
29
+ os.makedirs('checkpoints/' + args.exp_name)
30
+ if not os.path.exists('checkpoints/' + args.exp_name + '/' + 'models'):
31
+ os.makedirs('checkpoints/' + args.exp_name + '/' + 'models')
32
+ os.system('cp main.py checkpoints' + '/' + args.exp_name + '/' + 'main.py.backup')
33
+ os.system('cp model.py checkpoints' + '/' + args.exp_name + '/' + 'model.py.backup')
34
+
35
+
36
+ class IOStream:
37
+ def __init__(self, path):
38
+ self.f = open(path, 'a')
39
+
40
+ def cprint(self, text):
41
+ print(text)
42
+ self.f.write(text + '\n')
43
+ self.f.flush()
44
+
45
+ def close(self):
46
+ self.f.close()
47
+
48
+ def test_one_epoch(device, model, test_loader):
49
+ model.eval()
50
+ test_loss = 0.0
51
+ pred = 0.0
52
+ count = 0
53
+ for i, data in enumerate(tqdm(test_loader)):
54
+ template, source, igt = data
55
+
56
+ template = template.to(device)
57
+ source = source.to(device)
58
+ igt = igt.to(device)
59
+
60
+ output = model(template, source)
61
+ loss_val = ChamferDistanceLoss()(template, output['transformed_source'])
62
+
63
+ test_loss += loss_val.item()
64
+ count += 1
65
+
66
+ test_loss = float(test_loss)/count
67
+ return test_loss
68
+
69
+ def test(args, model, test_loader, textio):
70
+ test_loss, test_accuracy = test_one_epoch(args.device, model, test_loader)
71
+ textio.cprint('Validation Loss: %f & Validation Accuracy: %f'%(test_loss, test_accuracy))
72
+
73
+ def train_one_epoch(device, model, train_loader, optimizer):
74
+ model.train()
75
+ train_loss = 0.0
76
+ pred = 0.0
77
+ count = 0
78
+ for i, data in enumerate(tqdm(train_loader)):
79
+ template, source, igt = data
80
+
81
+ template = template.to(device)
82
+ source = source.to(device)
83
+ igt = igt.to(device)
84
+
85
+ output = model(template, source)
86
+ loss_val = ChamferDistanceLoss()(template, output['transformed_source'])
87
+ # print(loss_val.item())
88
+
89
+ # forward + backward + optimize
90
+ optimizer.zero_grad()
91
+ loss_val.backward()
92
+ optimizer.step()
93
+
94
+ train_loss += loss_val.item()
95
+ count += 1
96
+
97
+ train_loss = float(train_loss)/count
98
+ return train_loss
99
+
100
+ def train(args, model, train_loader, test_loader, boardio, textio, checkpoint):
101
+ learnable_params = filter(lambda p: p.requires_grad, model.parameters())
102
+ if args.optimizer == 'Adam':
103
+ optimizer = torch.optim.Adam(learnable_params)
104
+ else:
105
+ optimizer = torch.optim.SGD(learnable_params, lr=0.1)
106
+
107
+ if checkpoint is not None:
108
+ min_loss = checkpoint['min_loss']
109
+ optimizer.load_state_dict(checkpoint['optimizer'])
110
+
111
+ best_test_loss = np.inf
112
+
113
+ for epoch in range(args.start_epoch, args.epochs):
114
+ train_loss = train_one_epoch(args.device, model, train_loader, optimizer)
115
+ test_loss = test_one_epoch(args.device, model, test_loader)
116
+
117
+ if test_loss<best_test_loss:
118
+ best_test_loss = test_loss
119
+ snap = {'epoch': epoch + 1,
120
+ 'model': model.state_dict(),
121
+ 'min_loss': best_test_loss,
122
+ 'optimizer' : optimizer.state_dict(),}
123
+ torch.save(snap, 'checkpoints/%s/models/best_model_snap.t7' % (args.exp_name))
124
+ torch.save(model.state_dict(), 'checkpoints/%s/models/best_model.t7' % (args.exp_name))
125
+ torch.save(model.feature_model.state_dict(), 'checkpoints/%s/models/best_ptnet_model.t7' % (args.exp_name))
126
+
127
+ torch.save(snap, 'checkpoints/%s/models/model_snap.t7' % (args.exp_name))
128
+ torch.save(model.state_dict(), 'checkpoints/%s/models/model.t7' % (args.exp_name))
129
+ torch.save(model.feature_model.state_dict(), 'checkpoints/%s/models/ptnet_model.t7' % (args.exp_name))
130
+
131
+ boardio.add_scalar('Train Loss', train_loss, epoch+1)
132
+ boardio.add_scalar('Test Loss', test_loss, epoch+1)
133
+ boardio.add_scalar('Best Test Loss', best_test_loss, epoch+1)
134
+
135
+ textio.cprint('EPOCH:: %d, Traininig Loss: %f, Testing Loss: %f, Best Loss: %f'%(epoch+1, train_loss, test_loss, best_test_loss))
136
+
137
+ def options():
138
+ parser = argparse.ArgumentParser(description='Point Cloud Registration')
139
+ parser.add_argument('--exp_name', type=str, default='exp_ipcrnet', metavar='N',
140
+ help='Name of the experiment')
141
+ parser.add_argument('--dataset_path', type=str, default='ModelNet40',
142
+ metavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'
143
+ parser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')
144
+
145
+ # settings for input data
146
+ parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
147
+ metavar='DATASET', help='dataset type (default: modelnet)')
148
+ parser.add_argument('--num_points', default=1024, type=int,
149
+ metavar='N', help='points in point-cloud (default: 1024)')
150
+
151
+ # settings for PointNet
152
+ parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
153
+ help='train pointnet (default: tune)')
154
+ parser.add_argument('--emb_dims', default=1024, type=int,
155
+ metavar='K', help='dim. of the feature vector (default: 1024)')
156
+ parser.add_argument('--symfn', default='max', choices=['max', 'avg'],
157
+ help='symmetric function (default: max)')
158
+
159
+ # settings for on training
160
+ parser.add_argument('--seed', type=int, default=1234)
161
+ parser.add_argument('-j', '--workers', default=4, type=int,
162
+ metavar='N', help='number of data loading workers (default: 4)')
163
+ parser.add_argument('-b', '--batch_size', default=20, type=int,
164
+ metavar='N', help='mini-batch size (default: 32)')
165
+ parser.add_argument('--epochs', default=200, type=int,
166
+ metavar='N', help='number of total epochs to run')
167
+ parser.add_argument('--start_epoch', default=0, type=int,
168
+ metavar='N', help='manual epoch number (useful on restarts)')
169
+ parser.add_argument('--optimizer', default='Adam', choices=['Adam', 'SGD'],
170
+ metavar='METHOD', help='name of an optimizer (default: Adam)')
171
+ parser.add_argument('--resume', default='', type=str,
172
+ metavar='PATH', help='path to latest checkpoint (default: null (no-use))')
173
+ parser.add_argument('--pretrained', default='', type=str,
174
+ metavar='PATH', help='path to pretrained model file (default: null (no-use))')
175
+ parser.add_argument('--device', default='cuda:0', type=str,
176
+ metavar='DEVICE', help='use CUDA if available')
177
+
178
+ args = parser.parse_args()
179
+ return args
180
+
181
+ def main():
182
+ args = options()
183
+
184
+ torch.backends.cudnn.deterministic = True
185
+ torch.manual_seed(args.seed)
186
+ torch.cuda.manual_seed_all(args.seed)
187
+ np.random.seed(args.seed)
188
+
189
+ boardio = SummaryWriter(log_dir='checkpoints/' + args.exp_name)
190
+ _init_(args)
191
+
192
+ textio = IOStream('checkpoints/' + args.exp_name + '/run.log')
193
+ textio.cprint(str(args))
194
+
195
+
196
+ trainset = RegistrationData('PCRNet', ModelNet40Data(train=True))
197
+ testset = RegistrationData('PCRNet', ModelNet40Data(train=False))
198
+ train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
199
+ test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
200
+
201
+ if not torch.cuda.is_available():
202
+ args.device = 'cpu'
203
+ args.device = torch.device(args.device)
204
+
205
+ # Create PointNet Model.
206
+ ptnet = PointNet(emb_dims=args.emb_dims)
207
+ model = iPCRNet(feature_model=ptnet)
208
+ model = model.to(args.device)
209
+
210
+ checkpoint = None
211
+ if args.resume:
212
+ assert os.path.isfile(args.resume)
213
+ checkpoint = torch.load(args.resume)
214
+ args.start_epoch = checkpoint['epoch']
215
+ model.load_state_dict(checkpoint['model'])
216
+
217
+ if args.pretrained:
218
+ assert os.path.isfile(args.pretrained)
219
+ model.load_state_dict(torch.load(args.pretrained, map_location='cpu'))
220
+ model.to(args.device)
221
+
222
+ if args.eval:
223
+ test(args, model, test_loader, textio)
224
+ else:
225
+ train(args, model, train_loader, test_loader, boardio, textio, checkpoint)
226
+
227
+ if __name__ == '__main__':
228
+ main()
@@ -0,0 +1,245 @@
1
+ import argparse
2
+ import os
3
+ import sys
4
+ import logging
5
+ import numpy
6
+ import numpy as np
7
+ import torch
8
+ import torch.utils.data
9
+ import torchvision
10
+ from torch.utils.data import DataLoader
11
+ from tensorboardX import SummaryWriter
12
+ from tqdm import tqdm
13
+
14
+ # Only if the files are in example folder.
15
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
16
+ if BASE_DIR[-8:] == 'examples':
17
+ sys.path.append(os.path.join(BASE_DIR, os.pardir))
18
+ os.chdir(os.path.join(BASE_DIR, os.pardir))
19
+
20
+ from learning3d.models import create_pointconv
21
+ from learning3d.models import Classifier
22
+ from learning3d.data_utils import ClassificationData, ModelNet40Data
23
+
24
+ def _init_(args):
25
+ if not os.path.exists('checkpoints'):
26
+ os.makedirs('checkpoints')
27
+ if not os.path.exists('checkpoints/' + args.exp_name):
28
+ os.makedirs('checkpoints/' + args.exp_name)
29
+ if not os.path.exists('checkpoints/' + args.exp_name + '/' + 'models'):
30
+ os.makedirs('checkpoints/' + args.exp_name + '/' + 'models')
31
+ os.system('cp main.py checkpoints' + '/' + args.exp_name + '/' + 'main.py.backup')
32
+ os.system('cp model.py checkpoints' + '/' + args.exp_name + '/' + 'model.py.backup')
33
+
34
+
35
+ class IOStream:
36
+ def __init__(self, path):
37
+ self.f = open(path, 'a')
38
+
39
+ def cprint(self, text):
40
+ print(text)
41
+ self.f.write(text + '\n')
42
+ self.f.flush()
43
+
44
+ def close(self):
45
+ self.f.close()
46
+
47
+ def test_one_epoch(device, model, test_loader):
48
+ model.eval()
49
+ test_loss = 0.0
50
+ pred = 0.0
51
+ count = 0
52
+ for i, data in enumerate(tqdm(test_loader)):
53
+ points, target = data
54
+ target = target[:,0]
55
+
56
+ points = points.to(device)
57
+ target = target.to(device)
58
+
59
+ output = model(points)
60
+ loss_val = torch.nn.functional.nll_loss(
61
+ torch.nn.functional.log_softmax(output, dim=1), target, size_average=False)
62
+
63
+ test_loss += loss_val.item()
64
+ count += output.size(0)
65
+
66
+ _, pred1 = output.max(dim=1)
67
+ ag = (pred1 == target)
68
+ am = ag.sum()
69
+ pred += am.item()
70
+
71
+ test_loss = float(test_loss)/count
72
+ accuracy = float(pred)/count
73
+ return test_loss, accuracy
74
+
75
+ def test(args, model, test_loader, textio):
76
+ test_loss, test_accuracy = test_one_epoch(args.device, model, test_loader)
77
+ textio.cprint('Validation Loss: %f & Validation Accuracy: %f'%(test_loss, test_accuracy))
78
+
79
+ def train_one_epoch(device, model, train_loader, optimizer):
80
+ model.train()
81
+ train_loss = 0.0
82
+ pred = 0.0
83
+ count = 0
84
+ for i, data in enumerate(tqdm(train_loader)):
85
+ points, target = data
86
+ target = target[:,0]
87
+
88
+ points = points.to(device)
89
+ target = target.to(device)
90
+
91
+ output = model(points)
92
+ loss_val = torch.nn.functional.nll_loss(
93
+ torch.nn.functional.log_softmax(output, dim=1), target, size_average=False)
94
+ # print(loss_val.item())
95
+
96
+ # forward + backward + optimize
97
+ optimizer.zero_grad()
98
+ loss_val.backward()
99
+ optimizer.step()
100
+
101
+ train_loss += loss_val.item()
102
+ count += output.size(0)
103
+
104
+ _, pred1 = output.max(dim=1)
105
+ ag = (pred1 == target)
106
+ am = ag.sum()
107
+ pred += am.item()
108
+
109
+ train_loss = float(train_loss)/count
110
+ accuracy = float(pred)/count
111
+ return train_loss, accuracy
112
+
113
+ def train(args, model, train_loader, test_loader, boardio, textio, checkpoint):
114
+ learnable_params = filter(lambda p: p.requires_grad, model.parameters())
115
+ if args.optimizer == 'Adam':
116
+ optimizer = torch.optim.Adam(learnable_params)
117
+ else:
118
+ optimizer = torch.optim.SGD(learnable_params, lr=0.1)
119
+
120
+ if checkpoint is not None:
121
+ min_loss = checkpoint['min_loss']
122
+ optimizer.load_state_dict(checkpoint['optimizer'])
123
+
124
+ best_test_loss = np.inf
125
+
126
+ for epoch in range(args.start_epoch, args.epochs):
127
+ train_loss, train_accuracy = train_one_epoch(args.device, model, train_loader, optimizer)
128
+ test_loss, test_accuracy = test_one_epoch(args.device, model, test_loader)
129
+
130
+ if test_loss<best_test_loss:
131
+ best_test_loss = test_loss
132
+ snap = {'epoch': epoch + 1,
133
+ 'model': model.state_dict(),
134
+ 'min_loss': best_test_loss,
135
+ 'optimizer' : optimizer.state_dict(),}
136
+ torch.save(snap, 'checkpoints/%s/models/best_model_snap.t7' % (args.exp_name))
137
+ torch.save(model.state_dict(), 'checkpoints/%s/models/best_model.t7' % (args.exp_name))
138
+ torch.save(model.feature_model.state_dict(), 'checkpoints/%s/models/best_ptnet_model.t7' % (args.exp_name))
139
+
140
+ torch.save(snap, 'checkpoints/%s/models/model_snap.t7' % (args.exp_name))
141
+ torch.save(model.state_dict(), 'checkpoints/%s/models/model.t7' % (args.exp_name))
142
+ torch.save(model.feature_model.state_dict(), 'checkpoints/%s/models/ptnet_model.t7' % (args.exp_name))
143
+
144
+ boardio.add_scalar('Train Loss', train_loss, epoch+1)
145
+ boardio.add_scalar('Test Loss', test_loss, epoch+1)
146
+ boardio.add_scalar('Best Test Loss', best_test_loss, epoch+1)
147
+ boardio.add_scalar('Train Accuracy', train_accuracy, epoch+1)
148
+ boardio.add_scalar('Test Accuracy', test_accuracy, epoch+1)
149
+
150
+ textio.cprint('EPOCH:: %d, Traininig Loss: %f, Testing Loss: %f, Best Loss: %f'%(epoch+1, train_loss, test_loss, best_test_loss))
151
+ textio.cprint('EPOCH:: %d, Traininig Accuracy: %f, Testing Accuracy: %f'%(epoch+1, train_accuracy, test_accuracy))
152
+
153
+ def options():
154
+ parser = argparse.ArgumentParser(description='Point Cloud Registration')
155
+ parser.add_argument('--exp_name', type=str, default='exp_classifier', metavar='N',
156
+ help='Name of the experiment')
157
+ parser.add_argument('--dataset_path', type=str, default='ModelNet40',
158
+ metavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'
159
+ parser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')
160
+
161
+ # settings for input data
162
+ parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
163
+ metavar='DATASET', help='dataset type (default: modelnet)')
164
+ parser.add_argument('--num_points', default=1024, type=int,
165
+ metavar='N', help='points in point-cloud (default: 1024)')
166
+
167
+ # settings for PointNet
168
+ parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
169
+ help='train pointnet (default: tune)')
170
+ parser.add_argument('--emb_dims', default=1024, type=int,
171
+ metavar='K', help='dim. of the feature vector (default: 1024)')
172
+ parser.add_argument('--symfn', default='max', choices=['max', 'avg'],
173
+ help='symmetric function (default: max)')
174
+
175
+ # settings for on training
176
+ parser.add_argument('--seed', type=int, default=1234)
177
+ parser.add_argument('-j', '--workers', default=4, type=int,
178
+ metavar='N', help='number of data loading workers (default: 4)')
179
+ parser.add_argument('-b', '--batch_size', default=32, type=int,
180
+ metavar='N', help='mini-batch size (default: 32)')
181
+ parser.add_argument('--epochs', default=200, type=int,
182
+ metavar='N', help='number of total epochs to run')
183
+ parser.add_argument('--start_epoch', default=0, type=int,
184
+ metavar='N', help='manual epoch number (useful on restarts)')
185
+ parser.add_argument('--optimizer', default='Adam', choices=['Adam', 'SGD'],
186
+ metavar='METHOD', help='name of an optimizer (default: Adam)')
187
+ parser.add_argument('--resume', default='', type=str,
188
+ metavar='PATH', help='path to latest checkpoint (default: null (no-use))')
189
+ parser.add_argument('--pretrained', default='', type=str,
190
+ metavar='PATH', help='path to pretrained model file (default: null (no-use))')
191
+ parser.add_argument('--device', default='cuda:0', type=str,
192
+ metavar='DEVICE', help='use CUDA if available')
193
+
194
+ args = parser.parse_args()
195
+ return args
196
+
197
+ def main():
198
+ args = options()
199
+ args.dataset_path = os.path.join(os.getcwd(), os.pardir, os.pardir, 'ModelNet40', 'ModelNet40')
200
+
201
+ torch.backends.cudnn.deterministic = True
202
+ torch.manual_seed(args.seed)
203
+ torch.cuda.manual_seed_all(args.seed)
204
+ np.random.seed(args.seed)
205
+
206
+ boardio = SummaryWriter(log_dir='checkpoints/' + args.exp_name)
207
+ _init_(args)
208
+
209
+ textio = IOStream('checkpoints/' + args.exp_name + '/run.log')
210
+ textio.cprint(str(args))
211
+
212
+
213
+ trainset = ClassificationData(ModelNet40Data(train=True))
214
+ testset = ClassificationData(ModelNet40Data(train=False))
215
+ train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
216
+ test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
217
+
218
+ if not torch.cuda.is_available():
219
+ args.device = 'cpu'
220
+ args.device = torch.device(args.device)
221
+
222
+ # Create PointConv Model.
223
+ PointConv = create_pointconv(classifier=False, pretrained=None)
224
+ ptconv = PointConv(emb_dims=args.emb_dims, classifier=False, pretrained=None)
225
+ model = Classifier(feature_model=ptconv)
226
+
227
+ checkpoint = None
228
+ if args.resume:
229
+ assert os.path.isfile(args.resume)
230
+ checkpoint = torch.load(args.resume)
231
+ args.start_epoch = checkpoint['epoch']
232
+ model.load_state_dict(checkpoint['model'])
233
+
234
+ if args.pretrained:
235
+ assert os.path.isfile(args.pretrained)
236
+ model.load_state_dict(torch.load(args.pretrained, map_location='cpu'))
237
+ model.to(args.device)
238
+
239
+ if args.eval:
240
+ test(args, model, test_loader, textio)
241
+ else:
242
+ train(args, model, train_loader, test_loader, boardio, textio, checkpoint)
243
+
244
+ if __name__ == '__main__':
245
+ main()