learning3d 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. learning3d/__init__.py +0 -2
  2. learning3d/models/__init__.py +1 -6
  3. learning3d/utils/__init__.py +1 -6
  4. {learning3d-0.0.2.dist-info → learning3d-0.0.3.dist-info}/METADATA +1 -1
  5. {learning3d-0.0.2.dist-info → learning3d-0.0.3.dist-info}/RECORD +8 -43
  6. learning3d/examples/test_flownet.py +0 -113
  7. learning3d/examples/train_flownet.py +0 -259
  8. learning3d/models/flownet3d.py +0 -446
  9. learning3d/utils/lib/build/lib.linux-x86_64-3.5/pointnet2_cuda.cpython-35m-x86_64-linux-gnu.so +0 -0
  10. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query.o +0 -0
  11. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query_gpu.o +0 -0
  12. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points.o +0 -0
  13. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points_gpu.o +0 -0
  14. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate.o +0 -0
  15. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate_gpu.o +0 -0
  16. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/pointnet2_api.o +0 -0
  17. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling.o +0 -0
  18. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling_gpu.o +0 -0
  19. learning3d/utils/lib/dist/pointnet2-0.0.0-py3.5-linux-x86_64.egg +0 -0
  20. learning3d/utils/lib/pointnet2.egg-info/SOURCES.txt +0 -14
  21. learning3d/utils/lib/pointnet2.egg-info/dependency_links.txt +0 -1
  22. learning3d/utils/lib/pointnet2.egg-info/top_level.txt +0 -1
  23. learning3d/utils/lib/pointnet2_modules.py +0 -160
  24. learning3d/utils/lib/pointnet2_utils.py +0 -318
  25. learning3d/utils/lib/pytorch_utils.py +0 -236
  26. learning3d/utils/lib/setup.py +0 -23
  27. learning3d/utils/lib/src/ball_query.cpp +0 -25
  28. learning3d/utils/lib/src/ball_query_gpu.cu +0 -67
  29. learning3d/utils/lib/src/ball_query_gpu.h +0 -15
  30. learning3d/utils/lib/src/cuda_utils.h +0 -15
  31. learning3d/utils/lib/src/group_points.cpp +0 -36
  32. learning3d/utils/lib/src/group_points_gpu.cu +0 -86
  33. learning3d/utils/lib/src/group_points_gpu.h +0 -22
  34. learning3d/utils/lib/src/interpolate.cpp +0 -65
  35. learning3d/utils/lib/src/interpolate_gpu.cu +0 -233
  36. learning3d/utils/lib/src/interpolate_gpu.h +0 -36
  37. learning3d/utils/lib/src/pointnet2_api.cpp +0 -25
  38. learning3d/utils/lib/src/sampling.cpp +0 -46
  39. learning3d/utils/lib/src/sampling_gpu.cu +0 -253
  40. learning3d/utils/lib/src/sampling_gpu.h +0 -29
  41. {learning3d-0.0.2.dist-info → learning3d-0.0.3.dist-info}/LICENSE +0 -0
  42. {learning3d-0.0.2.dist-info → learning3d-0.0.3.dist-info}/WHEEL +0 -0
  43. {learning3d-0.0.2.dist-info → learning3d-0.0.3.dist-info}/top_level.txt +0 -0
learning3d/__init__.py CHANGED
@@ -1,2 +0,0 @@
1
- from .models import PointNet, create_pointconv, DGCNN, PPFNet, Pooling, Classifier, Segmentation
2
- from .models import DCP, PRNet, iPCRNet, PointNetLK, RPMNet, PCN, DeepGMR, MaskNet, MaskNet2
@@ -15,9 +15,4 @@ from .rpmnet import RPMNet
15
15
  from .pcn import PCN
16
16
  from .deepgmr import DeepGMR
17
17
  from .masknet import MaskNet
18
- from .masknet2 import MaskNet2
19
-
20
- try:
21
- from .flownet3d import FlowNet3D
22
- except:
23
- print("Error raised in pointnet2 module for FlowNet3D Network!\nEither don't use pointnet2_utils or retry it's setup.")
18
+ from .masknet2 import MaskNet2
@@ -1,9 +1,4 @@
1
1
  from .svd import SVDHead
2
2
  from .transformer import Transformer, Identity
3
3
  from .ppfnet_util import angle_difference, square_distance, index_points, farthest_point_sample, query_ball_point, sample_and_group, sample_and_group_multi
4
- from .pointconv_util import PointConvDensitySetAbstraction
5
-
6
- try:
7
- from .lib import pointnet2_utils
8
- except:
9
- print("Error raised in pointnet2 module in utils!\nEither don't use pointnet2_utils or retry it's setup.")
4
+ from .pointconv_util import PointConvDensitySetAbstraction
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: learning3d
3
- Version: 0.0.2
3
+ Version: 0.0.3
4
4
  Summary: Learning3D: A Modern Library for Deep Learning on 3D Point Clouds Data
5
5
  Author-email: Vinit Sarode <vinitsarode5@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/vinits5/learning3d
@@ -1,10 +1,9 @@
1
- learning3d/__init__.py,sha256=WsLbk69HZ7OXJwTpNFcEphgtNp4sqEVGbZ1HGeNdYZ0,189
1
+ learning3d/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  learning3d/data_utils/__init__.py,sha256=iYAVh0FThnVlG42QIgmDYrC3NGVYuzKX8s1oRqAI1YU,261
3
3
  learning3d/data_utils/dataloaders.py,sha256=kb0wsLlMN7sB-CS_4BGSprSaZBwkUNYxS5iwUdD6JJM,14871
4
4
  learning3d/data_utils/user_data.py,sha256=ADDGeCUCr6TcXhcxvAFncIeLO71xoRHYi4H418ktvQs,4828
5
5
  learning3d/examples/test_dcp.py,sha256=o1hgl22b2xhqqYe4_k7NlNo3-rIMmSzzssCI74DCfoE,5606
6
6
  learning3d/examples/test_deepgmr.py,sha256=pb-gRE6YLLaPFmiBXQjaE-B-J314QY4RkNwLHx65bcU,5597
7
- learning3d/examples/test_flownet.py,sha256=52i9UOnSpCxZXrCgYJ-HNxsxRXqr_K4PydYMF90Lxv8,4603
8
7
  learning3d/examples/test_masknet.py,sha256=dkqUui9sv2SzHtvtzUzL_PxJxMcBMqCSDPAYg0BWAVU,6405
9
8
  learning3d/examples/test_masknet2.py,sha256=3_XWBQOwQjK3BCQ__BmPhCvYI_0hZMK3X4C-P2Krw6w,6859
10
9
  learning3d/examples/test_pcn.py,sha256=4eaosjJVqiFxlqnaUWu-O2Jawt4uU16UJEzitIjP2us,4342
@@ -17,7 +16,6 @@ learning3d/examples/test_rpmnet.py,sha256=oy-z7I26IQxr4TD_p0qCRnOn6H8VbQFyiWO83Z
17
16
  learning3d/examples/train_PointNetLK.py,sha256=0GgT2NYKNZl8o02rvW-nYBO_1tlfDNuakuAXtm1V16c,8773
18
17
  learning3d/examples/train_dcp.py,sha256=SQVrwnZqGmFCZv_X2tzMysBmv-HI9sllZMWw5zsW3NM,9511
19
18
  learning3d/examples/train_deepgmr.py,sha256=vxdkgfQZPtwuYryR0chegTiLuXOQag8r_ccGJ6qtw7o,9397
20
- learning3d/examples/train_flownet.py,sha256=V3uG7EaqsQO0BtmAFFN_aHb5bsyYoLv3JKb1_XhYKNw,10369
21
19
  learning3d/examples/train_masknet.py,sha256=XzgWsmVAm5Lk21mH9qhvNN0um4pI1fYVfsBAV4deSOM,8889
22
20
  learning3d/examples/train_pcn.py,sha256=X7MSYVXwgIMExplua1M9pG20eNhZ_0p83yTADSYrAlA,7542
23
21
  learning3d/examples/train_pcrnet.py,sha256=KQ8MiDUiR46qS9t7tc5POJ3NjMyZFBEPOVQY-7Vszpk,8198
@@ -45,12 +43,11 @@ learning3d/losses/cuda/emd_torch/pkg/layer/__init__.py,sha256=aDpfP0iZyg3Uu-ppa3
45
43
  learning3d/losses/cuda/emd_torch/pkg/layer/emd_loss_layer.py,sha256=yCEEfafLZ1Ia_BCrE7mcnDRDaaEj6je3Rj8cnQ_Zrts,1019
46
44
  learning3d/losses/cuda/emd_torch/pkg/src/emd.cpp,sha256=lQ4q2XO5K2CldYmnRJnGhKTR9hVRFTwO305bHT3Cauo,17
47
45
  learning3d/losses/cuda/emd_torch/pkg/src/cuda/emd.cu,sha256=DJXALRWyiVDzaKWsD2tQnEXrIT9GpIldkvIJ9fZMGbI,1462
48
- learning3d/models/__init__.py,sha256=r5upYkq5M4pu7rxuFxFI5AshVGQI-gMMsQRm_iF3TuE,642
46
+ learning3d/models/__init__.py,sha256=WgAx7FlaUbijjl3qjQCvwyrbdhxvKHV7HbqBlq47ux8,473
49
47
  learning3d/models/classifier.py,sha256=_LUNXbLrpKNXmCkO2R1mz64dbwfrze7f_4SYT1Z6SYo,1205
50
48
  learning3d/models/dcp.py,sha256=LZFgtk9f9f9s3QvX65nFXGgC33yGIZuy4XjviwH8OGE,3377
51
49
  learning3d/models/deepgmr.py,sha256=vIxOQrZjvOCHLElJCjZ8EcZ-vm0-v71IKsPGuSF-elE,5298
52
50
  learning3d/models/dgcnn.py,sha256=Bt-dP2NwpOy4AcWrspXfVV1EKL-ZQElYUp2uaWNvE_Q,3057
53
- learning3d/models/flownet3d.py,sha256=2cPqzwXyw5uBNWIpHLTwRf0exSHYcW2Lyd94wOHgXZ0,17667
54
51
  learning3d/models/masknet.py,sha256=ElMF3b-JgYmgwSEf1taGQvhA7Xy7_MiHEofzc03VCd8,2705
55
52
  learning3d/models/masknet2.py,sha256=6lgukurfzUOY-6xdCpMljOYFtvADLSczAXJzRC3Jkh4,9063
56
53
  learning3d/models/pcn.py,sha256=FvpjLR6t3kFQ1I4Fhpbsaj_P8Ml6S912x36JAZ1dUKs,5346
@@ -71,45 +68,13 @@ learning3d/ops/se3.py,sha256=x6oLbQzLOXiM0xDJsVUCUE1liZ_TaJzkkHQvIyjqCqI,3957
71
68
  learning3d/ops/sinc.py,sha256=A_Ffu07RXGx3daZn4zOGdnW10_l06cmhFdAiU4NKhcw,5228
72
69
  learning3d/ops/so3.py,sha256=b0tX5nHyF2Qtp8V0ejGKaPaHJQ_G38ifQ7gSJzRU1ts,5166
73
70
  learning3d/ops/transform_functions.py,sha256=hvNjZO-uJodsGYtQwtAtDxtQ6uBpA7Lv9t-_yAg6wxo,12806
74
- learning3d/utils/__init__.py,sha256=iuYyToRcZ9YZMNn-ngJMjzU8p4FkaO2m8YyXSE14hN4,442
71
+ learning3d/utils/__init__.py,sha256=lwMtW0H4LEZJuoVCi7qmfaWdOVrL2YmP_YP-ZfUoSlc,286
75
72
  learning3d/utils/pointconv_util.py,sha256=kJxGztai7X15YsGuorMOc50SPtj_k1yfkP4XCTzIWdM,14331
76
73
  learning3d/utils/ppfnet_util.py,sha256=HEoxkgUBlawKZLWspfQm3caWUyAMIrW-ECtStNYbe2Y,7989
77
74
  learning3d/utils/svd.py,sha256=yCYQt2SKqeIzCBnBEr_8xFR79m4fIoNVFnp77epn1dM,1936
78
75
  learning3d/utils/transformer.py,sha256=UDgJvnh7ekWyijaAn-a3ckeFeMxlK_chXzWlhAGDiPM,8974
79
- learning3d/utils/lib/pointnet2_modules.py,sha256=Tqiz32BT-fc2gHmv87xbbNUctE2mEpYpnJ3jfVmqw0w,6339
80
- learning3d/utils/lib/pointnet2_utils.py,sha256=TQOVhi22raBffgtKWsVf-reoEqPkN-zvv6GOh2TqHxY,10637
81
- learning3d/utils/lib/pytorch_utils.py,sha256=-kJfrswEu0gnnrYM1yIrj6HFW2YVyK2P9To4EyYAqNw,6173
82
- learning3d/utils/lib/setup.py,sha256=AF3MiatsAYYEIl26Vt5JlsmZEI19lV86k6b4fw1uxgU,679
83
- learning3d/utils/lib/build/lib.linux-x86_64-3.5/pointnet2_cuda.cpython-35m-x86_64-linux-gnu.so,sha256=HjZKllvXRMLn1ONxFPA-RF2athcY06clU-D91CLS4Dw,7222568
84
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query.o,sha256=_jT_UVj61Yi2eDgpn1gn595TTqbQ8hAERAazE9kcuwA,2730016
85
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query_gpu.o,sha256=GFosEyxTG6gPBUZJ7niNojLxJvt12v2cnO0lqtnO3mI,13912
86
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points.o,sha256=kNMDj14OwT70OtmCf63mdtnf58tXpnKXe95DOHu63Ow,2490896
87
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points_gpu.o,sha256=CCX8QD24Kp4hJqUFG34e88Co5poWc6XvkARQYpBgs88,17048
88
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate.o,sha256=xyOxGf0zrgX8k6Muvn4WgGuZWk0N1-E7r4Fnjpao-sg,2503232
89
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate_gpu.o,sha256=4kAHyU-Ho3_Sr77NWJINzVndHu8PdirIUqm0tQ7yVY4,32712
90
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/pointnet2_api.o,sha256=L8v-YcL3J53kKxFw50frNlsKl1n4HowU5krlscSZ-5E,7242784
91
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling.o,sha256=lOw0IlJMXjCbTw9LONz7_TEzjWl484WFzilyzW742Io,2495440
92
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling_gpu.o,sha256=ULYz2ppU_n0KU8ka0Q4FE9wdPUuZDYcmvxRWN8UgUnU,95544
93
- learning3d/utils/lib/dist/pointnet2-0.0.0-py3.5-linux-x86_64.egg,sha256=zwF5_pf6Id5lVow5MGkdug_upjMpbEwW7hUTIHEub0I,2241340
94
- learning3d/utils/lib/pointnet2.egg-info/SOURCES.txt,sha256=XtaY4SQXYOT8Hhdjw4iworVxoDjoWdmdBe5fpkwK_-c,328
95
- learning3d/utils/lib/pointnet2.egg-info/dependency_links.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
96
- learning3d/utils/lib/pointnet2.egg-info/top_level.txt,sha256=XVeKz4zj8Ixbw0jjiXuOqdON71dDXExnItEZVUaOdVE,15
97
- learning3d/utils/lib/src/ball_query.cpp,sha256=Ysx8OAn4G-dSrlPgbZS2WCDfxcV54HQTZu5FbL2QlKs,933
98
- learning3d/utils/lib/src/ball_query_gpu.cu,sha256=vj-Y6gUh2oVLLYTuz8JE8tcHxGrJBTC4pTRsNcWNa1o,2050
99
- learning3d/utils/lib/src/ball_query_gpu.h,sha256=47MXh8U46wcpvQ7wcC05QyAsE1N3GB-TExsT0hoApxE,476
100
- learning3d/utils/lib/src/cuda_utils.h,sha256=es4AGc6gYXileyddOxCGsmzV0Mg9ZG6LiucMUw6ee8o,353
101
- learning3d/utils/lib/src/group_points.cpp,sha256=koGBIklYmbwHDpCrHPURJl_f1AtsqpNB2DbgRRb4zuc,1171
102
- learning3d/utils/lib/src/group_points_gpu.cu,sha256=OIa_C0egafVIvJTcfu7UUI-E4ylxktcSG-fUjJQbNow,3307
103
- learning3d/utils/lib/src/group_points_gpu.h,sha256=lNX0VSo6xFCfWQh-ZPUIBobu2kfmFz8PF8l2zZe1NyQ,836
104
- learning3d/utils/lib/src/interpolate.cpp,sha256=VKMC4dveIgdfTiqpjrhe5CMqW097Hgmza5PkZVKVUfc,2521
105
- learning3d/utils/lib/src/interpolate_gpu.cu,sha256=yOHn2D5MFonqrfLD7K7-DFozXqSDqdetdaFHJzsPMKs,7470
106
- learning3d/utils/lib/src/interpolate_gpu.h,sha256=SbxS1oY6Qrp3f-r_kDtjq5_EzRpdIZpJIGVfmhhmnsI,1477
107
- learning3d/utils/lib/src/pointnet2_api.cpp,sha256=LRvlZoDLpaZHg2Tvb2ad5j8qVNCZ7TX9XEsNfdlSpT8,1213
108
- learning3d/utils/lib/src/sampling.cpp,sha256=kH75rE1jxri4v8wIksKAjaUbJlXB3FLEXYQTmb0kmII,1549
109
- learning3d/utils/lib/src/sampling_gpu.cu,sha256=V9dTFaYksQ-jNnoUvQgRNuFW9MmaBdZpbmXQbnvShJc,7934
110
- learning3d/utils/lib/src/sampling_gpu.h,sha256=STr6hTB9A8D0skTTR6TiU79j1eSc-FRqik_j0PPWDmM,1045
111
- learning3d-0.0.2.dist-info/LICENSE,sha256=3qY3_NeQIvalbLlsHFtOfuUKjs_U2k6u7rf6YVx6ac0,1098
112
- learning3d-0.0.2.dist-info/METADATA,sha256=hb8g3GL1qvPFFtblCBP_RaVPimnlrkqbv0nR4fIHLc4,15813
113
- learning3d-0.0.2.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
114
- learning3d-0.0.2.dist-info/top_level.txt,sha256=nTmYW8NhbNV1_15DGNpl_OvvSFtQP98sy3qrrHr0eLo,11
115
- learning3d-0.0.2.dist-info/RECORD,,
76
+ learning3d-0.0.3.dist-info/LICENSE,sha256=3qY3_NeQIvalbLlsHFtOfuUKjs_U2k6u7rf6YVx6ac0,1098
77
+ learning3d-0.0.3.dist-info/METADATA,sha256=RjRxCmF_3xda4kPWDCvP2QjyPSysHZdNm3PY42-Jlhg,15813
78
+ learning3d-0.0.3.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
79
+ learning3d-0.0.3.dist-info/top_level.txt,sha256=nTmYW8NhbNV1_15DGNpl_OvvSFtQP98sy3qrrHr0eLo,11
80
+ learning3d-0.0.3.dist-info/RECORD,,
@@ -1,113 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
-
4
-
5
- import open3d as o3d
6
- import os
7
- import gc
8
- import argparse
9
- import torch
10
- import torch.nn as nn
11
- import torch.nn.functional as F
12
- import torch.optim as optim
13
- from torch.optim.lr_scheduler import MultiStepLR
14
- from learning3d.models import FlowNet3D
15
- from learning3d.data_utils import SceneflowDataset
16
- import numpy as np
17
- from torch.utils.data import DataLoader
18
- from tensorboardX import SummaryWriter
19
- from tqdm import tqdm
20
-
21
- def display_open3d(template, source, transformed_source):
22
- template_ = o3d.geometry.PointCloud()
23
- source_ = o3d.geometry.PointCloud()
24
- transformed_source_ = o3d.geometry.PointCloud()
25
- template_.points = o3d.utility.Vector3dVector(template)
26
- source_.points = o3d.utility.Vector3dVector(source + np.array([0,0.5,0.5]))
27
- transformed_source_.points = o3d.utility.Vector3dVector(transformed_source)
28
- template_.paint_uniform_color([1, 0, 0])
29
- source_.paint_uniform_color([0, 1, 0])
30
- transformed_source_.paint_uniform_color([0, 0, 1])
31
- o3d.visualization.draw_geometries([template_, source_, transformed_source_])
32
-
33
- def test_one_epoch(args, net, test_loader):
34
- net.eval()
35
-
36
- total_loss = 0
37
- num_examples = 0
38
- for i, data in enumerate(tqdm(test_loader)):
39
- data = [d.to(args.device) for d in data]
40
- pc1, pc2, color1, color2, flow, mask1 = data
41
- pc1 = pc1.transpose(2,1).contiguous()
42
- pc2 = pc2.transpose(2,1).contiguous()
43
- color1 = color1.transpose(2,1).contiguous()
44
- color2 = color2.transpose(2,1).contiguous()
45
- flow = flow
46
- mask1 = mask1.float()
47
-
48
- batch_size = pc1.size(0)
49
- num_examples += batch_size
50
- flow_pred = net(pc1, pc2, color1, color2).permute(0,2,1)
51
- loss_1 = torch.mean(mask1 * torch.sum((flow_pred - flow) * (flow_pred - flow), -1) / 2.0)
52
-
53
- pc1, pc2 = pc1.permute(0,2,1), pc2.permute(0,2,1)
54
- pc1_ = pc1 - flow_pred
55
- print("Loss: ", loss_1)
56
- display_open3d(pc1.detach().cpu().numpy()[0], pc2.detach().cpu().numpy()[0], pc1_.detach().cpu().numpy()[0])
57
- total_loss += loss_1.item() * batch_size
58
-
59
- return total_loss * 1.0 / num_examples
60
-
61
-
62
- def test(args, net, test_loader):
63
- test_loss = test_one_epoch(args, net, test_loader)
64
-
65
- def main():
66
- parser = argparse.ArgumentParser(description='Point Cloud Registration')
67
- parser.add_argument('--model', type=str, default='flownet', metavar='N',
68
- choices=['flownet'], help='Model to use, [flownet]')
69
- parser.add_argument('--emb_dims', type=int, default=512, metavar='N',
70
- help='Dimension of embeddings')
71
- parser.add_argument('--num_points', type=int, default=2048,
72
- help='Point Number [default: 2048]')
73
- parser.add_argument('--test_batch_size', type=int, default=1, metavar='batch_size',
74
- help='Size of batch)')
75
-
76
- parser.add_argument('--gaussian_noise', type=bool, default=False, metavar='N',
77
- help='Wheter to add gaussian noise')
78
- parser.add_argument('--unseen', type=bool, default=False, metavar='N',
79
- help='Whether to test on unseen category')
80
- parser.add_argument('--dataset', type=str, default='SceneflowDataset',
81
- choices=['SceneflowDataset'], metavar='N',
82
- help='dataset to use')
83
- parser.add_argument('--dataset_path', type=str, default='data_processed_maxcut_35_20k_2k_8192', metavar='N',
84
- help='dataset to use')
85
- parser.add_argument('--pretrained', type=str, default='learning3d/pretrained/exp_flownet/models/model.best.t7', metavar='N',
86
- help='Pretrained model path')
87
- parser.add_argument('--device', default='cuda:0', type=str,
88
- metavar='DEVICE', help='use CUDA if available')
89
-
90
- args = parser.parse_args()
91
- if not torch.cuda.is_available():
92
- args.device = torch.device('cpu')
93
- else:
94
- args.device = torch.device('cuda')
95
-
96
- if args.dataset == 'SceneflowDataset':
97
- test_loader = DataLoader(
98
- SceneflowDataset(npoints=args.num_points, partition='test'),
99
- batch_size=args.test_batch_size, shuffle=False, drop_last=False)
100
- else:
101
- raise Exception("not implemented")
102
-
103
- net = FlowNet3D()
104
- assert os.path.exists(args.pretrained), "Pretrained Model Doesn't Exists!"
105
- net.load_state_dict(torch.load(args.pretrained, map_location='cpu'))
106
- net = net.to(args.device)
107
-
108
- test(args, net, test_loader)
109
- print('FINISH')
110
-
111
-
112
- if __name__ == '__main__':
113
- main()
@@ -1,259 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
-
4
-
5
- from __future__ import print_function
6
- import os
7
- import gc
8
- import argparse
9
- import torch
10
- import torch.nn as nn
11
- import torch.nn.functional as F
12
- import torch.optim as optim
13
- from torch.optim.lr_scheduler import MultiStepLR
14
- from learning3d.models import FlowNet3D
15
- from learning3d.data_utils import SceneflowDataset
16
- import numpy as np
17
- from torch.utils.data import DataLoader
18
- from tensorboardX import SummaryWriter
19
- from tqdm import tqdm
20
-
21
- class IOStream:
22
- def __init__(self, path):
23
- self.f = open(path, 'a')
24
-
25
- def cprint(self, text):
26
- print(text)
27
- self.f.write(text + '\n')
28
- self.f.flush()
29
-
30
- def close(self):
31
- self.f.close()
32
-
33
-
34
- def _init_(args):
35
- if not os.path.exists('checkpoints'):
36
- os.makedirs('checkpoints')
37
- if not os.path.exists('checkpoints/' + args.exp_name):
38
- os.makedirs('checkpoints/' + args.exp_name)
39
- if not os.path.exists('checkpoints/' + args.exp_name + '/' + 'models'):
40
- os.makedirs('checkpoints/' + args.exp_name + '/' + 'models')
41
-
42
- def weights_init(m):
43
- classname=m.__class__.__name__
44
- if classname.find('Conv2d') != -1:
45
- nn.init.kaiming_normal_(m.weight.data)
46
- if classname.find('Conv1d') != -1:
47
- nn.init.kaiming_normal_(m.weight.data)
48
-
49
- def test_one_epoch(args, net, test_loader):
50
- net.eval()
51
-
52
- total_loss = 0
53
- num_examples = 0
54
- for i, data in tqdm(enumerate(test_loader), total=len(test_loader), smoothing=0.9):
55
- pc1, pc2, color1, color2, flow, mask1 = data
56
- pc1 = pc1.cuda().transpose(2,1).contiguous()
57
- pc2 = pc2.cuda().transpose(2,1).contiguous()
58
- color1 = color1.cuda().transpose(2,1).contiguous()
59
- color2 = color2.cuda().transpose(2,1).contiguous()
60
- flow = flow.cuda()
61
- mask1 = mask1.cuda().float()
62
-
63
- batch_size = pc1.size(0)
64
- num_examples += batch_size
65
- flow_pred = net(pc1, pc2, color1, color2).permute(0,2,1)
66
- loss_1 = torch.mean(mask1 * torch.sum((flow_pred - flow) * (flow_pred - flow), -1) / 2.0)
67
-
68
- pc1, pc2 = pc1.permute(0,2,1), pc2.permute(0,2,1)
69
- pc1_ = pc1 + flow_pred
70
-
71
- total_loss += loss_1.item() * batch_size
72
-
73
-
74
- return total_loss * 1.0 / num_examples
75
-
76
-
77
- def train_one_epoch(args, net, train_loader, opt):
78
- net.train()
79
- num_examples = 0
80
- total_loss = 0
81
- for i, data in tqdm(enumerate(train_loader), total=len(train_loader), smoothing=0.9):
82
- pc1, pc2, color1, color2, flow, mask1 = data
83
- pc1 = pc1.cuda().transpose(2,1).contiguous()
84
- pc2 = pc2.cuda().transpose(2,1).contiguous()
85
- color1 = color1.cuda().transpose(2,1).contiguous()
86
- color2 = color2.cuda().transpose(2,1).contiguous()
87
- flow = flow.cuda().transpose(2,1).contiguous()
88
- mask1 = mask1.cuda().float()
89
-
90
- batch_size = pc1.size(0)
91
- opt.zero_grad()
92
- num_examples += batch_size
93
- flow_pred = net(pc1, pc2, color1, color2)
94
- loss_1 = torch.mean(mask1 * torch.sum((flow_pred - flow) ** 2, 1) / 2.0)
95
-
96
- pc1, pc2, flow_pred = pc1.permute(0,2,1), pc2.permute(0,2,1), flow_pred.permute(0,2,1)
97
- pc1_ = pc1 + flow_pred
98
-
99
- loss_1.backward()
100
-
101
- opt.step()
102
- total_loss += loss_1.item() * batch_size
103
-
104
- # if (i+1) % 100 == 0:
105
- # print("batch: %d, mean loss: %f" % (i, total_loss / 100 / batch_size))
106
- # total_loss = 0
107
- return total_loss * 1.0 / num_examples
108
-
109
-
110
- def test(args, net, test_loader, boardio, textio):
111
-
112
- test_loss = test_one_epoch(args, net, test_loader)
113
-
114
- textio.cprint('==FINAL TEST==')
115
- textio.cprint('mean test loss: %f'%test_loss)
116
-
117
-
118
- def train(args, net, train_loader, test_loader, boardio, textio):
119
- if args.use_sgd:
120
- print("Use SGD")
121
- opt = optim.SGD(net.parameters(), lr=args.lr * 100, momentum=args.momentum, weight_decay=1e-4)
122
- else:
123
- print("Use Adam")
124
- opt = optim.Adam(net.parameters(), lr=args.lr, weight_decay=1e-4)
125
- scheduler = MultiStepLR(opt, milestones=[75, 150, 200], gamma=0.1)
126
-
127
- best_test_loss = np.inf
128
- for epoch in range(args.epochs):
129
- scheduler.step()
130
- textio.cprint('==epoch: %d=='%epoch)
131
- train_loss = train_one_epoch(args, net, train_loader, opt)
132
- textio.cprint('mean train EPE loss: %f'%train_loss)
133
-
134
- test_loss = test_one_epoch(args, net, test_loader)
135
- textio.cprint('mean test EPE loss: %f'%test_loss)
136
-
137
- if best_test_loss >= test_loss:
138
- best_test_loss = test_loss
139
- textio.cprint('best test loss till now: %f'%test_loss)
140
- if torch.cuda.device_count() > 1:
141
- torch.save(net.module.state_dict(), 'checkpoints/%s/models/model.best.t7' % args.exp_name)
142
- else:
143
- torch.save(net.state_dict(), 'checkpoints/%s/models/model.best.t7' % args.exp_name)
144
-
145
- boardio.add_scalar('Train Loss', train_loss, epoch+1)
146
- boardio.add_scalar('Test Loss', test_loss, epoch+1)
147
- boardio.add_scalar('Best Test Loss', best_test_loss, epoch+1)
148
-
149
- if torch.cuda.device_count() > 1:
150
- torch.save(net.module.state_dict(), 'checkpoints/%s/models/model.%d.t7' % (args.exp_name, epoch))
151
- else:
152
- torch.save(net.state_dict(), 'checkpoints/%s/models/model.%d.t7' % (args.exp_name, epoch))
153
- gc.collect()
154
-
155
-
156
- def main():
157
- parser = argparse.ArgumentParser(description='Point Cloud Registration')
158
- parser.add_argument('--exp_name', type=str, default='exp_flownet', metavar='N',
159
- help='Name of the experiment')
160
- parser.add_argument('--model', type=str, default='flownet', metavar='N',
161
- choices=['flownet'],
162
- help='Model to use, [flownet]')
163
- parser.add_argument('--emb_dims', type=int, default=512, metavar='N',
164
- help='Dimension of embeddings')
165
- parser.add_argument('--num_points', type=int, default=2048,
166
- help='Point Number [default: 2048]')
167
- parser.add_argument('--dropout', type=float, default=0.5, metavar='N',
168
- help='Dropout ratio in transformer')
169
- parser.add_argument('--batch_size', type=int, default=16, metavar='batch_size',
170
- help='Size of batch)')
171
- parser.add_argument('--test_batch_size', type=int, default=10, metavar='batch_size',
172
- help='Size of batch)')
173
- parser.add_argument('--epochs', type=int, default=250, metavar='N',
174
- help='number of episode to train ')
175
- parser.add_argument('--use_sgd', action='store_true', default=True,
176
- help='Use SGD')
177
- parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
178
- help='learning rate (default: 0.001, 0.1 if using sgd)')
179
- parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
180
- help='SGD momentum (default: 0.9)')
181
- parser.add_argument('--no_cuda', action='store_true', default=False,
182
- help='enables CUDA training')
183
- parser.add_argument('--seed', type=int, default=1234, metavar='S',
184
- help='random seed (default: 1)')
185
- parser.add_argument('--eval', action='store_true', default=False,
186
- help='evaluate the model')
187
- parser.add_argument('--cycle', type=bool, default=False, metavar='N',
188
- help='Whether to use cycle consistency')
189
- parser.add_argument('--gaussian_noise', type=bool, default=False, metavar='N',
190
- help='Wheter to add gaussian noise')
191
- parser.add_argument('--unseen', type=bool, default=False, metavar='N',
192
- help='Whether to test on unseen category')
193
- parser.add_argument('--dataset', type=str, default='SceneflowDataset',
194
- choices=['SceneflowDataset'], metavar='N',
195
- help='dataset to use')
196
- parser.add_argument('--dataset_path', type=str, default='data_processed_maxcut_35_20k_2k_8192', metavar='N',
197
- help='dataset to use')
198
- parser.add_argument('--model_path', type=str, default='', metavar='N',
199
- help='Pretrained model path')
200
- parser.add_argument('--pretrained', type=str, default='', metavar='N',
201
- help='Pretrained model path')
202
-
203
- args = parser.parse_args()
204
- os.environ['CUDA_VISIBLE_DEVICES'] = '0'
205
- # CUDA settings
206
- torch.backends.cudnn.deterministic = True
207
- torch.manual_seed(args.seed)
208
- torch.cuda.manual_seed_all(args.seed)
209
- np.random.seed(args.seed)
210
-
211
- boardio = SummaryWriter(log_dir='checkpoints/' + args.exp_name)
212
- _init_(args)
213
-
214
- textio = IOStream('checkpoints/' + args.exp_name + '/run.log')
215
- textio.cprint(str(args))
216
-
217
- if args.dataset == 'SceneflowDataset':
218
- train_loader = DataLoader(
219
- SceneflowDataset(npoints=args.num_points, partition='train'),
220
- batch_size=args.batch_size, shuffle=True, drop_last=True)
221
- test_loader = DataLoader(
222
- SceneflowDataset(npoints=args.num_points, partition='test'),
223
- batch_size=args.test_batch_size, shuffle=False, drop_last=False)
224
- else:
225
- raise Exception("not implemented")
226
-
227
- if args.model == 'flownet':
228
- net = FlowNet3D().cuda()
229
- net.apply(weights_init)
230
- if args.pretrained:
231
- net.load_state_dict(torch.load(args.pretrained), strict=False)
232
- print("Pretrained Model Loaded Successfully!")
233
- if args.eval:
234
- if args.model_path is '':
235
- model_path = 'checkpoints' + '/' + args.exp_name + '/models/model.best.t7'
236
- else:
237
- model_path = args.model_path
238
- print(model_path)
239
- if not os.path.exists(model_path):
240
- print("can't find pretrained model")
241
- return
242
- net.load_state_dict(torch.load(model_path), strict=False)
243
- if torch.cuda.device_count() > 1:
244
- net = nn.DataParallel(net)
245
- print("Let's use", torch.cuda.device_count(), "GPUs!")
246
- else:
247
- raise Exception('Not implemented')
248
- if args.eval:
249
- test(args, net, test_loader, boardio, textio)
250
- else:
251
- train(args, net, train_loader, test_loader, boardio, textio)
252
-
253
-
254
- print('FINISH')
255
- # boardio.close()
256
-
257
-
258
- if __name__ == '__main__':
259
- main()