learning3d 0.1.0__tar.gz → 0.2.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- learning3d-0.2.1/MANIFEST.in +4 -0
- {learning3d-0.1.0/src/learning3d.egg-info → learning3d-0.2.1}/PKG-INFO +57 -12
- {learning3d-0.1.0 → learning3d-0.2.1}/README.md +31 -12
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/data_utils/dataloaders.py +16 -14
- learning3d-0.2.1/learning3d/examples/test_curvenet.py +118 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/test_dcp.py +3 -5
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/test_deepgmr.py +3 -5
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/test_masknet.py +1 -3
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/test_masknet2.py +1 -3
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/test_pcn.py +2 -4
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/test_pcrnet.py +1 -3
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/test_pnlk.py +1 -3
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/test_pointconv.py +1 -3
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/test_pointnet.py +1 -3
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/test_prnet.py +3 -5
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/test_rpmnet.py +1 -3
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/train_PointNetLK.py +2 -4
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/train_dcp.py +2 -4
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/train_deepgmr.py +2 -4
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/train_masknet.py +2 -4
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/train_pcn.py +2 -4
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/train_pcrnet.py +2 -4
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/train_pointconv.py +2 -4
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/train_pointnet.py +2 -4
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/train_prnet.py +2 -4
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/examples/train_rpmnet.py +2 -4
- {learning3d-0.1.0 → learning3d-0.2.1/learning3d/learning3d.egg-info}/PKG-INFO +57 -12
- learning3d-0.2.1/learning3d/learning3d.egg-info/SOURCES.txt +71 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/__init__.py +7 -1
- learning3d-0.2.1/learning3d/models/curvenet.py +130 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/dgcnn.py +1 -35
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/prnet.py +5 -39
- learning3d-0.2.1/learning3d/utils/__init__.py +23 -0
- learning3d-0.2.1/learning3d/utils/curvenet_util.py +540 -0
- learning3d-0.2.1/learning3d/utils/model_common_utils.py +156 -0
- {learning3d-0.1.0 → learning3d-0.2.1}/pyproject.toml +10 -3
- learning3d-0.1.0/src/learning3d.egg-info/requires.txt → learning3d-0.2.1/requirements.txt +1 -0
- learning3d-0.1.0/MANIFEST.in +0 -2
- learning3d-0.1.0/data/modelnet40_ply_hdf5_2048/shape_names.txt +0 -40
- learning3d-0.1.0/data/modelnet40_ply_hdf5_2048/test_files.txt +0 -2
- learning3d-0.1.0/data/modelnet40_ply_hdf5_2048/train_files.txt +0 -5
- learning3d-0.1.0/requirements.txt +0 -11
- learning3d-0.1.0/src/learning3d/losses/cuda/chamfer_distance/__init__.py +0 -1
- learning3d-0.1.0/src/learning3d/losses/cuda/chamfer_distance/chamfer_distance.cpp +0 -185
- learning3d-0.1.0/src/learning3d/losses/cuda/chamfer_distance/chamfer_distance.cu +0 -209
- learning3d-0.1.0/src/learning3d/losses/cuda/chamfer_distance/chamfer_distance.py +0 -66
- learning3d-0.1.0/src/learning3d/losses/cuda/emd_torch/pkg/emd_loss_layer.py +0 -41
- learning3d-0.1.0/src/learning3d/losses/cuda/emd_torch/pkg/include/cuda/emd.cuh +0 -347
- learning3d-0.1.0/src/learning3d/losses/cuda/emd_torch/pkg/include/cuda_helper.h +0 -18
- learning3d-0.1.0/src/learning3d/losses/cuda/emd_torch/pkg/include/emd.h +0 -54
- learning3d-0.1.0/src/learning3d/losses/cuda/emd_torch/pkg/layer/__init__.py +0 -1
- learning3d-0.1.0/src/learning3d/losses/cuda/emd_torch/pkg/layer/emd_loss_layer.py +0 -40
- learning3d-0.1.0/src/learning3d/losses/cuda/emd_torch/pkg/src/cuda/emd.cu +0 -70
- learning3d-0.1.0/src/learning3d/losses/cuda/emd_torch/pkg/src/emd.cpp +0 -1
- learning3d-0.1.0/src/learning3d/losses/cuda/emd_torch/setup.py +0 -29
- learning3d-0.1.0/src/learning3d/ops/__init__.py +0 -0
- learning3d-0.1.0/src/learning3d/utils/__init__.py +0 -4
- learning3d-0.1.0/src/learning3d.egg-info/SOURCES.txt +0 -88
- learning3d-0.1.0/src/learning3d.egg-info/dependency_links.txt +0 -1
- learning3d-0.1.0/src/learning3d.egg-info/top_level.txt +0 -1
- {learning3d-0.1.0 → learning3d-0.2.1}/LICENSE +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/data_utils/__init__.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/data_utils/user_data.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/losses/__init__.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/losses/chamfer_distance.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/losses/classification.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/losses/correspondence_loss.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/losses/emd.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/losses/frobenius_norm.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/losses/rmse_features.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/classifier.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/dcp.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/deepgmr.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/masknet.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/masknet2.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/pcn.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/pcrnet.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/pointconv.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/pointnet.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/pointnetlk.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/pooling.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/ppfnet.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/rpmnet.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/models/segmentation.py +0 -0
- {learning3d-0.1.0/src/learning3d → learning3d-0.2.1/learning3d/ops}/__init__.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/ops/data_utils.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/ops/invmat.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/ops/quaternion.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/ops/se3.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/ops/sinc.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/ops/so3.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/ops/transform_functions.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/utils/pointconv_util.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/utils/ppfnet_util.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/utils/svd.py +0 -0
- {learning3d-0.1.0/src → learning3d-0.2.1}/learning3d/utils/transformer.py +0 -0
- {learning3d-0.1.0 → learning3d-0.2.1}/setup.cfg +0 -0
@@ -1,10 +1,35 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: learning3d
|
3
|
-
Version: 0.1
|
3
|
+
Version: 0.2.1
|
4
4
|
Summary: Learning3D: A Modern Library for Deep Learning on 3D Point Clouds Data
|
5
5
|
Author-email: Vinit Sarode <vinitsarode5@gmail.com>
|
6
|
+
Maintainer-email: Vinit Sarode <vinitsarode5@gmail.com>
|
7
|
+
License: The MIT License
|
8
|
+
|
9
|
+
Copyright (c) 2010-2019 Google, Inc. http://angularjs.org
|
10
|
+
|
11
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
12
|
+
of this software and associated documentation files (the "Software"), to deal
|
13
|
+
in the Software without restriction, including without limitation the rights
|
14
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
15
|
+
copies of the Software, and to permit persons to whom the Software is
|
16
|
+
furnished to do so, subject to the following conditions:
|
17
|
+
|
18
|
+
The above copyright notice and this permission notice shall be included in
|
19
|
+
all copies or substantial portions of the Software.
|
20
|
+
|
21
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
22
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
23
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
24
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
25
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
26
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
27
|
+
THE SOFTWARE.
|
6
28
|
Project-URL: Homepage, https://github.com/vinits5/learning3d
|
29
|
+
Project-URL: Repository, https://github.com/vinits5/learning3d
|
7
30
|
Project-URL: Issues, https://github.com/vinits5/learning3d/issues
|
31
|
+
Project-URL: Changelog, https://github.com/vinits5/learning3d/CHANGELOG.md
|
32
|
+
Keywords: Point Clouds,Deep Learning,3D Vision,Point Cloud Registration,Point Cloud Classification,Point Cloud Segmentation
|
8
33
|
Classifier: Programming Language :: Python :: 3
|
9
34
|
Classifier: License :: OSI Approved :: MIT License
|
10
35
|
Classifier: Operating System :: OS Independent
|
@@ -22,9 +47,10 @@ Requires-Dist: scikit-learn==1.2.2
|
|
22
47
|
Requires-Dist: scipy==1.10.1
|
23
48
|
Requires-Dist: numpy==1.24.3
|
24
49
|
Requires-Dist: transforms3d==0.4.1
|
50
|
+
Requires-Dist: pycuda
|
25
51
|
|
26
52
|
<p align="center">
|
27
|
-
<img src="https://github.com/vinits5/learning3d/blob/
|
53
|
+
<img src="https://github.com/vinits5/learning3d/blob/pypi_v0.2.0/learning3d/images/logo.png" height="170">
|
28
54
|
</p>
|
29
55
|
|
30
56
|
# Learning3D: A Modern Library for Deep Learning on 3D Point Clouds Data.
|
@@ -34,24 +60,40 @@ Requires-Dist: transforms3d==0.4.1
|
|
34
60
|
Learning3D is an open-source library that supports the development of deep learning algorithms that deal with 3D data. The Learning3D exposes a set of state of art deep neural networks in python. A modular code has been provided for further development. We welcome contributions from the open-source community.
|
35
61
|
|
36
62
|
## Latest News:
|
37
|
-
1. \[
|
38
|
-
2. \[
|
39
|
-
3. \[24
|
40
|
-
4. \[
|
41
|
-
5. \[24 Dec. 2020\]: [
|
42
|
-
6. \[
|
43
|
-
7. \[
|
63
|
+
1. \[28 Feb, 2025\]: [CurveNet](https://github.com/tiangexiang/CurveNet) is now a part of learning3d library.
|
64
|
+
2. \[7 Apr, 2024\]: Now, learning3d is available as pypi package.
|
65
|
+
3. \[24 Oct, 2023\]: [MaskNet++](https://github.com/zhouruqin/MaskNet2) is now a part of learning3d library.
|
66
|
+
4. \[12 May, 2022\]: [ChamferDistance](https://github.com/fwilliams/fml) loss function is incorporated in learning3d. This is a purely pytorch based loss function.
|
67
|
+
5. \[24 Dec. 2020\]: [MaskNet](https://arxiv.org/pdf/2010.09185.pdf) is now ready to enhance the performance of registration algorithms in learning3d for occluded point clouds.
|
68
|
+
6. \[24 Dec. 2020\]: Loss based on the predicted and ground truth correspondences is added in learning3d after consideration of [Correspondence Matrices are Underrated](https://arxiv.org/pdf/2010.16085.pdf) paper.
|
69
|
+
7. \[24 Dec. 2020\]: [PointConv](https://arxiv.org/abs/1811.07246), latent feature estimation using convolutions on point clouds is now available in learning3d.
|
70
|
+
8. \[16 Oct. 2020\]: [DeepGMR](https://wentaoyuan.github.io/deepgmr/), registration using gaussian mixture models is now available in learning3d
|
71
|
+
9. \[14 Oct. 2020\]: Now, use your own data in learning3d. (Check out [UserData](https://github.com/vinits5/learning3d#use-your-own-data) functionality!)
|
72
|
+
|
73
|
+
## PyPI package setup
|
74
|
+
### Setup from pypi server
|
75
|
+
```
|
76
|
+
pip install learning3d
|
77
|
+
```
|
78
|
+
|
79
|
+
### Setup using code
|
80
|
+
```
|
81
|
+
git clone https://github.com/vinits5/learning3d.git
|
82
|
+
cd learning3d
|
83
|
+
git checkout pypi_v0.1.0
|
84
|
+
python3 -m pip install .
|
85
|
+
```
|
44
86
|
|
45
87
|
## Available Computer Vision Algorithms in Learning3D
|
46
88
|
|
47
89
|
| Sr. No. | Tasks | Algorithms |
|
48
90
|
|:-------------:|:----------:|:-----|
|
49
|
-
| 1 | [Classification](https://github.com/vinits5/learning3d#use-of-classification--segmentation-network) | PointNet, DGCNN, PPFNet, [PointConv](https://github.com/vinits5/learning3d#use-of-pointconv) |
|
91
|
+
| 1 | [Classification](https://github.com/vinits5/learning3d#use-of-classification--segmentation-network) | PointNet, DGCNN, PPFNet, [PointConv](https://github.com/vinits5/learning3d#use-of-pointconv), [CurveNet](https://github.com/tiangexiang/CurveNet) |
|
50
92
|
| 2 | [Segmentation](https://github.com/vinits5/learning3d#use-of-classification--segmentation-network) | PointNet, DGCNN |
|
51
93
|
| 3 | [Reconstruction](https://github.com/vinits5/learning3d#use-of-point-completion-network) | Point Completion Network (PCN) |
|
52
94
|
| 4 | [Registration](https://github.com/vinits5/learning3d#use-of-registration-networks) | PointNetLK, PCRNet, DCP, PRNet, RPM-Net, DeepGMR |
|
53
95
|
| 5 | [Flow Estimation](https://github.com/vinits5/learning3d#use-of-flow-estimation-network) | FlowNet3D |
|
54
|
-
| 6 | [Inlier Estimation](https://github.com/vinits5/learning3d#use-of-inlier-estimation-network-masknet) | MaskNet, MaskNet++ |
|
96
|
+
| 6 | [Inlier Estimation](https://github.com/vinits5/learning3d#use-of-inlier-estimation-network-masknet) | MaskNet, [MaskNet++](https://github.com/zhouruqin/MaskNet2) |
|
55
97
|
|
56
98
|
## Available Pretrained Models
|
57
99
|
1. PointNet
|
@@ -66,6 +108,7 @@ Learning3D is an open-source library that supports the development of deep learn
|
|
66
108
|
10. PointConv (Download from this [link](https://github.com/DylanWusee/pointconv_pytorch/blob/master/checkpoints/checkpoint.pth))
|
67
109
|
11. MaskNet
|
68
110
|
12. MaskNet++ / MaskNet2
|
111
|
+
13. CurveNet
|
69
112
|
|
70
113
|
## Available Datasets
|
71
114
|
1. ModelNet40
|
@@ -81,7 +124,8 @@ Learning3D is an open-source library that supports the development of deep learn
|
|
81
124
|
1. Ubuntu 16.04
|
82
125
|
2. Ubuntu 18.04
|
83
126
|
3. Ubuntu 20.04.6
|
84
|
-
|
127
|
+
4. Linux Mint
|
128
|
+
5. macOS Sequoia 15.3.1
|
85
129
|
|
86
130
|
### Requirements
|
87
131
|
1. CUDA 10.0 or higher
|
@@ -268,3 +312,4 @@ PointConv variable is a class. Users can use it to create a sub-class to overrid
|
|
268
312
|
14. [CMU:](https://arxiv.org/pdf/2010.16085.pdf) Correspondence Matrices are Underrated
|
269
313
|
15. [MaskNet:](https://arxiv.org/pdf/2010.09185.pdf) A Fully-Convolutional Network to Estimate Inlier Points
|
270
314
|
16. [MaskNet++:](https://www.sciencedirect.com/science/article/abs/pii/S0097849322000085) Inlier/outlier identification for two point clouds
|
315
|
+
17. [CurveNet:](https://github.com/tiangexiang/CurveNet) Walk in the Cloud: Learning Curves for Point Clouds Shape Analysis
|
@@ -1,5 +1,5 @@
|
|
1
1
|
<p align="center">
|
2
|
-
<img src="https://github.com/vinits5/learning3d/blob/
|
2
|
+
<img src="https://github.com/vinits5/learning3d/blob/pypi_v0.2.0/learning3d/images/logo.png" height="170">
|
3
3
|
</p>
|
4
4
|
|
5
5
|
# Learning3D: A Modern Library for Deep Learning on 3D Point Clouds Data.
|
@@ -9,24 +9,40 @@
|
|
9
9
|
Learning3D is an open-source library that supports the development of deep learning algorithms that deal with 3D data. The Learning3D exposes a set of state of art deep neural networks in python. A modular code has been provided for further development. We welcome contributions from the open-source community.
|
10
10
|
|
11
11
|
## Latest News:
|
12
|
-
1. \[
|
13
|
-
2. \[
|
14
|
-
3. \[24
|
15
|
-
4. \[
|
16
|
-
5. \[24 Dec. 2020\]: [
|
17
|
-
6. \[
|
18
|
-
7. \[
|
12
|
+
1. \[28 Feb, 2025\]: [CurveNet](https://github.com/tiangexiang/CurveNet) is now a part of learning3d library.
|
13
|
+
2. \[7 Apr, 2024\]: Now, learning3d is available as pypi package.
|
14
|
+
3. \[24 Oct, 2023\]: [MaskNet++](https://github.com/zhouruqin/MaskNet2) is now a part of learning3d library.
|
15
|
+
4. \[12 May, 2022\]: [ChamferDistance](https://github.com/fwilliams/fml) loss function is incorporated in learning3d. This is a purely pytorch based loss function.
|
16
|
+
5. \[24 Dec. 2020\]: [MaskNet](https://arxiv.org/pdf/2010.09185.pdf) is now ready to enhance the performance of registration algorithms in learning3d for occluded point clouds.
|
17
|
+
6. \[24 Dec. 2020\]: Loss based on the predicted and ground truth correspondences is added in learning3d after consideration of [Correspondence Matrices are Underrated](https://arxiv.org/pdf/2010.16085.pdf) paper.
|
18
|
+
7. \[24 Dec. 2020\]: [PointConv](https://arxiv.org/abs/1811.07246), latent feature estimation using convolutions on point clouds is now available in learning3d.
|
19
|
+
8. \[16 Oct. 2020\]: [DeepGMR](https://wentaoyuan.github.io/deepgmr/), registration using gaussian mixture models is now available in learning3d
|
20
|
+
9. \[14 Oct. 2020\]: Now, use your own data in learning3d. (Check out [UserData](https://github.com/vinits5/learning3d#use-your-own-data) functionality!)
|
21
|
+
|
22
|
+
## PyPI package setup
|
23
|
+
### Setup from pypi server
|
24
|
+
```
|
25
|
+
pip install learning3d
|
26
|
+
```
|
27
|
+
|
28
|
+
### Setup using code
|
29
|
+
```
|
30
|
+
git clone https://github.com/vinits5/learning3d.git
|
31
|
+
cd learning3d
|
32
|
+
git checkout pypi_v0.1.0
|
33
|
+
python3 -m pip install .
|
34
|
+
```
|
19
35
|
|
20
36
|
## Available Computer Vision Algorithms in Learning3D
|
21
37
|
|
22
38
|
| Sr. No. | Tasks | Algorithms |
|
23
39
|
|:-------------:|:----------:|:-----|
|
24
|
-
| 1 | [Classification](https://github.com/vinits5/learning3d#use-of-classification--segmentation-network) | PointNet, DGCNN, PPFNet, [PointConv](https://github.com/vinits5/learning3d#use-of-pointconv) |
|
40
|
+
| 1 | [Classification](https://github.com/vinits5/learning3d#use-of-classification--segmentation-network) | PointNet, DGCNN, PPFNet, [PointConv](https://github.com/vinits5/learning3d#use-of-pointconv), [CurveNet](https://github.com/tiangexiang/CurveNet) |
|
25
41
|
| 2 | [Segmentation](https://github.com/vinits5/learning3d#use-of-classification--segmentation-network) | PointNet, DGCNN |
|
26
42
|
| 3 | [Reconstruction](https://github.com/vinits5/learning3d#use-of-point-completion-network) | Point Completion Network (PCN) |
|
27
43
|
| 4 | [Registration](https://github.com/vinits5/learning3d#use-of-registration-networks) | PointNetLK, PCRNet, DCP, PRNet, RPM-Net, DeepGMR |
|
28
44
|
| 5 | [Flow Estimation](https://github.com/vinits5/learning3d#use-of-flow-estimation-network) | FlowNet3D |
|
29
|
-
| 6 | [Inlier Estimation](https://github.com/vinits5/learning3d#use-of-inlier-estimation-network-masknet) | MaskNet, MaskNet++ |
|
45
|
+
| 6 | [Inlier Estimation](https://github.com/vinits5/learning3d#use-of-inlier-estimation-network-masknet) | MaskNet, [MaskNet++](https://github.com/zhouruqin/MaskNet2) |
|
30
46
|
|
31
47
|
## Available Pretrained Models
|
32
48
|
1. PointNet
|
@@ -41,6 +57,7 @@ Learning3D is an open-source library that supports the development of deep learn
|
|
41
57
|
10. PointConv (Download from this [link](https://github.com/DylanWusee/pointconv_pytorch/blob/master/checkpoints/checkpoint.pth))
|
42
58
|
11. MaskNet
|
43
59
|
12. MaskNet++ / MaskNet2
|
60
|
+
13. CurveNet
|
44
61
|
|
45
62
|
## Available Datasets
|
46
63
|
1. ModelNet40
|
@@ -56,7 +73,8 @@ Learning3D is an open-source library that supports the development of deep learn
|
|
56
73
|
1. Ubuntu 16.04
|
57
74
|
2. Ubuntu 18.04
|
58
75
|
3. Ubuntu 20.04.6
|
59
|
-
|
76
|
+
4. Linux Mint
|
77
|
+
5. macOS Sequoia 15.3.1
|
60
78
|
|
61
79
|
### Requirements
|
62
80
|
1. CUDA 10.0 or higher
|
@@ -242,4 +260,5 @@ PointConv variable is a class. Users can use it to create a sub-class to overrid
|
|
242
260
|
13. [DeepGMR:](https://arxiv.org/abs/2008.09088) Learning Latent Gaussian Mixture Models for Registration
|
243
261
|
14. [CMU:](https://arxiv.org/pdf/2010.16085.pdf) Correspondence Matrices are Underrated
|
244
262
|
15. [MaskNet:](https://arxiv.org/pdf/2010.09185.pdf) A Fully-Convolutional Network to Estimate Inlier Points
|
245
|
-
16. [MaskNet++:](https://www.sciencedirect.com/science/article/abs/pii/S0097849322000085) Inlier/outlier identification for two point clouds
|
263
|
+
16. [MaskNet++:](https://www.sciencedirect.com/science/article/abs/pii/S0097849322000085) Inlier/outlier identification for two point clouds
|
264
|
+
17. [CurveNet:](https://github.com/tiangexiang/CurveNet) Walk in the Cloud: Learning Curves for Point Clouds Shape Analysis
|
@@ -16,8 +16,9 @@ from scipy.spatial.distance import minkowski
|
|
16
16
|
from scipy.spatial import cKDTree
|
17
17
|
from torch.utils.data import Dataset
|
18
18
|
|
19
|
-
def download_modelnet40(
|
20
|
-
|
19
|
+
def download_modelnet40():
|
20
|
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
21
|
+
DATA_DIR = os.path.join(BASE_DIR, os.pardir, 'data')
|
21
22
|
if not os.path.exists(DATA_DIR):
|
22
23
|
os.mkdir(DATA_DIR)
|
23
24
|
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
|
@@ -27,10 +28,11 @@ def download_modelnet40(root_dir):
|
|
27
28
|
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
|
28
29
|
os.system('rm %s' % (zipfile))
|
29
30
|
|
30
|
-
def load_data(
|
31
|
+
def load_data(train, use_normals):
|
31
32
|
if train: partition = 'train'
|
32
33
|
else: partition = 'test'
|
33
|
-
|
34
|
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
35
|
+
DATA_DIR = os.path.join(BASE_DIR, os.pardir, 'data')
|
34
36
|
all_data = []
|
35
37
|
all_label = []
|
36
38
|
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048', 'ply_data_%s*.h5' % partition)):
|
@@ -184,15 +186,13 @@ class ModelNet40Data(Dataset):
|
|
184
186
|
self,
|
185
187
|
train=True,
|
186
188
|
num_points=1024,
|
187
|
-
download=
|
188
|
-
root_dir='./',
|
189
|
+
download=True,
|
189
190
|
randomize_data=False,
|
190
191
|
use_normals=False
|
191
192
|
):
|
192
193
|
super(ModelNet40Data, self).__init__()
|
193
|
-
|
194
|
-
|
195
|
-
self.data, self.labels = load_data(root_dir, train, use_normals)
|
194
|
+
if download: download_modelnet40()
|
195
|
+
self.data, self.labels = load_data(train, use_normals)
|
196
196
|
if not train: self.shapes = self.read_classes_ModelNet40()
|
197
197
|
self.num_points = num_points
|
198
198
|
self.randomize_data = randomize_data
|
@@ -218,7 +218,8 @@ class ModelNet40Data(Dataset):
|
|
218
218
|
return self.shapes[label]
|
219
219
|
|
220
220
|
def read_classes_ModelNet40(self):
|
221
|
-
|
221
|
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
222
|
+
DATA_DIR = os.path.join(BASE_DIR, os.pardir, 'data')
|
222
223
|
file = open(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048', 'shape_names.txt'), 'r')
|
223
224
|
shape_names = file.read()
|
224
225
|
shape_names = np.array(shape_names.split('\n')[:-1])
|
@@ -226,7 +227,7 @@ class ModelNet40Data(Dataset):
|
|
226
227
|
|
227
228
|
|
228
229
|
class ClassificationData(Dataset):
|
229
|
-
def __init__(self, data_class):
|
230
|
+
def __init__(self, data_class=ModelNet40Data()):
|
230
231
|
super(ClassificationData, self).__init__()
|
231
232
|
self.set_class(data_class)
|
232
233
|
|
@@ -247,7 +248,7 @@ class ClassificationData(Dataset):
|
|
247
248
|
|
248
249
|
|
249
250
|
class RegistrationData(Dataset):
|
250
|
-
def __init__(self, algorithm, data_class, partial_source=False, partial_template=False, noise=False, additional_params={}):
|
251
|
+
def __init__(self, algorithm, data_class=ModelNet40Data(), partial_source=False, partial_template=False, noise=False, additional_params={}):
|
251
252
|
super(RegistrationData, self).__init__()
|
252
253
|
available_algorithms = ['PCRNet', 'PointNetLK', 'DCP', 'PRNet', 'iPCRNet', 'RPMNet', 'DeepGMR']
|
253
254
|
if algorithm in available_algorithms: self.algorithm = algorithm
|
@@ -361,9 +362,10 @@ class FlowData(Dataset):
|
|
361
362
|
|
362
363
|
|
363
364
|
class SceneflowDataset(Dataset):
|
364
|
-
def __init__(self,
|
365
|
+
def __init__(self, npoints=1024, root='', partition='train'):
|
365
366
|
if root == '':
|
366
|
-
|
367
|
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
368
|
+
DATA_DIR = os.path.join(BASE_DIR, os.pardir, 'data')
|
367
369
|
root = os.path.join(DATA_DIR, 'data_processed_maxcut_35_20k_2k_8192')
|
368
370
|
if not os.path.exists(root):
|
369
371
|
print("To download dataset, click here: https://drive.google.com/file/d/1CMaxdt-Tg1Wct8v8eGNwuT7qRSIyJPY-/view")
|
@@ -0,0 +1,118 @@
|
|
1
|
+
import open3d as o3d
|
2
|
+
import argparse
|
3
|
+
import os
|
4
|
+
import sys
|
5
|
+
import logging
|
6
|
+
import numpy
|
7
|
+
import numpy as np
|
8
|
+
import torch
|
9
|
+
import torch.utils.data
|
10
|
+
import torchvision
|
11
|
+
from torch.utils.data import DataLoader
|
12
|
+
from tensorboardX import SummaryWriter
|
13
|
+
from tqdm import tqdm
|
14
|
+
|
15
|
+
# Only if the files are in example folder.
|
16
|
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
17
|
+
if BASE_DIR[-8:] == 'examples':
|
18
|
+
sys.path.append(os.path.join(BASE_DIR, os.pardir))
|
19
|
+
os.chdir(os.path.join(BASE_DIR, os.pardir))
|
20
|
+
|
21
|
+
from learning3d.models import CurveNet
|
22
|
+
from learning3d.data_utils import ClassificationData, ModelNet40Data
|
23
|
+
|
24
|
+
def display_open3d(template):
|
25
|
+
template_ = o3d.geometry.PointCloud()
|
26
|
+
template_.points = o3d.utility.Vector3dVector(template)
|
27
|
+
# template_.paint_uniform_color([1, 0, 0])
|
28
|
+
o3d.visualization.draw_geometries([template_])
|
29
|
+
|
30
|
+
def test_one_epoch(device, model, test_loader, testset):
|
31
|
+
model.eval()
|
32
|
+
test_loss = 0.0
|
33
|
+
pred = 0.0
|
34
|
+
count = 0
|
35
|
+
for i, data in enumerate(tqdm(test_loader)):
|
36
|
+
points, target = data
|
37
|
+
target = target[:,0]
|
38
|
+
|
39
|
+
points = points.to(device)
|
40
|
+
target = target.to(device)
|
41
|
+
|
42
|
+
output = model(points)
|
43
|
+
loss_val = torch.nn.functional.nll_loss(
|
44
|
+
torch.nn.functional.log_softmax(output, dim=1), target, size_average=False)
|
45
|
+
print("Ground Truth Label: ", testset.get_shape(target[0].item()))
|
46
|
+
print("Predicted Label: ", testset.get_shape(torch.argmax(output[0]).item()))
|
47
|
+
display_open3d(points.detach().cpu().numpy()[0])
|
48
|
+
|
49
|
+
test_loss += loss_val.item()
|
50
|
+
count += output.size(0)
|
51
|
+
|
52
|
+
_, pred1 = output.max(dim=1)
|
53
|
+
ag = (pred1 == target)
|
54
|
+
am = ag.sum()
|
55
|
+
pred += am.item()
|
56
|
+
|
57
|
+
test_loss = float(test_loss)/count
|
58
|
+
accuracy = float(pred)/count
|
59
|
+
return test_loss, accuracy
|
60
|
+
|
61
|
+
def test(args, model, test_loader, testset):
|
62
|
+
test_loss, test_accuracy = test_one_epoch(args.device, model, test_loader, testset)
|
63
|
+
print("Accuracy: ", test_accuracy*100)
|
64
|
+
|
65
|
+
def options():
|
66
|
+
parser = argparse.ArgumentParser(description='Point Cloud Registration')
|
67
|
+
parser.add_argument('--dataset_path', type=str, default='ModelNet40',
|
68
|
+
metavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'
|
69
|
+
parser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')
|
70
|
+
|
71
|
+
# settings for input data
|
72
|
+
parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
|
73
|
+
metavar='DATASET', help='dataset type (default: modelnet)')
|
74
|
+
parser.add_argument('--num_points', default=1024, type=int,
|
75
|
+
metavar='N', help='points in point-cloud (default: 1024)')
|
76
|
+
|
77
|
+
# settings for CurveNet
|
78
|
+
parser.add_argument('-j', '--workers', default=4, type=int,
|
79
|
+
metavar='N', help='number of data loading workers (default: 4)')
|
80
|
+
parser.add_argument('-b', '--batch_size', default=32, type=int,
|
81
|
+
metavar='N', help='mini-batch size (default: 32)')
|
82
|
+
parser.add_argument('--num_classes', default=40, type=int,
|
83
|
+
metavar='K', help='number of classes to be predicted')
|
84
|
+
|
85
|
+
# settings for on training
|
86
|
+
parser.add_argument('--pretrained', default='learning3d/pretrained/exp_curvenet/models/model.t7', type=str,
|
87
|
+
metavar='PATH', help='path to pretrained model file (default: null (no-use))')
|
88
|
+
parser.add_argument('--device', default='cuda:0', type=str,
|
89
|
+
metavar='DEVICE', help='use CUDA if available')
|
90
|
+
|
91
|
+
args = parser.parse_args()
|
92
|
+
return args
|
93
|
+
|
94
|
+
def main():
|
95
|
+
args = options()
|
96
|
+
args.dataset_path = os.path.join(os.getcwd(), os.pardir, os.pardir, 'ModelNet40', 'ModelNet40')
|
97
|
+
|
98
|
+
testset = ClassificationData(ModelNet40Data(train=False))
|
99
|
+
test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
|
100
|
+
|
101
|
+
if not torch.cuda.is_available():
|
102
|
+
args.device = 'cpu'
|
103
|
+
args.device = torch.device(args.device)
|
104
|
+
|
105
|
+
# Create PointNet Model.
|
106
|
+
model = CurveNet(num_classes=args.num_classes, k=20)
|
107
|
+
|
108
|
+
if args.pretrained:
|
109
|
+
assert os.path.isfile(args.pretrained)
|
110
|
+
weights = torch.load(args.pretrained, map_location='cpu')
|
111
|
+
weights = {k[7:]: v for k, v in weights.items()}
|
112
|
+
model.load_state_dict(weights)
|
113
|
+
model.to(args.device)
|
114
|
+
|
115
|
+
test(args, model, test_loader, testset)
|
116
|
+
|
117
|
+
if __name__ == '__main__':
|
118
|
+
main()
|
@@ -88,8 +88,6 @@ def options():
|
|
88
88
|
metavar='DATASET', help='dataset type (default: modelnet)')
|
89
89
|
parser.add_argument('--num_points', default=1024, type=int,
|
90
90
|
metavar='N', help='points in point-cloud (default: 1024)')
|
91
|
-
parser.add_argument('--root_dir', default='./', type=str,
|
92
|
-
help='path of the data where modelnet files are downloaded.')
|
93
91
|
|
94
92
|
# settings for PointNet
|
95
93
|
parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
|
@@ -116,8 +114,8 @@ def main():
|
|
116
114
|
args = options()
|
117
115
|
torch.backends.cudnn.deterministic = True
|
118
116
|
|
119
|
-
trainset = RegistrationData('DCP', ModelNet40Data(train=True
|
120
|
-
testset = RegistrationData('DCP', ModelNet40Data(train=False
|
117
|
+
trainset = RegistrationData('DCP', ModelNet40Data(train=True))
|
118
|
+
testset = RegistrationData('DCP', ModelNet40Data(train=False))
|
121
119
|
train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
|
122
120
|
test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
|
123
121
|
|
@@ -132,7 +130,7 @@ def main():
|
|
132
130
|
|
133
131
|
if args.pretrained:
|
134
132
|
assert os.path.isfile(args.pretrained)
|
135
|
-
model.load_state_dict(torch.load(args.pretrained), strict=False)
|
133
|
+
model.load_state_dict(torch.load(args.pretrained, map_location='cpu'), strict=False)
|
136
134
|
model.to(args.device)
|
137
135
|
|
138
136
|
test(args, model, test_loader)
|
@@ -103,8 +103,6 @@ def options():
|
|
103
103
|
metavar='K', help='No of nearest neighbors to be estimated.')
|
104
104
|
parser.add_argument('--use_rri', default=True, type=bool,
|
105
105
|
help='Find nearest neighbors to estimate features from PointNet.')
|
106
|
-
parser.add_argument('--root_dir', default='./', type=str,
|
107
|
-
help='path of the data where modelnet files are downloaded.')
|
108
106
|
|
109
107
|
# settings for on training
|
110
108
|
parser.add_argument('-j', '--workers', default=4, type=int,
|
@@ -123,8 +121,8 @@ def main():
|
|
123
121
|
args = options()
|
124
122
|
torch.backends.cudnn.deterministic = True
|
125
123
|
|
126
|
-
trainset = RegistrationData('DeepGMR', ModelNet40Data(train=True
|
127
|
-
testset = RegistrationData('DeepGMR', ModelNet40Data(train=False
|
124
|
+
trainset = RegistrationData('DeepGMR', ModelNet40Data(train=True))
|
125
|
+
testset = RegistrationData('DeepGMR', ModelNet40Data(train=False))
|
128
126
|
train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
|
129
127
|
test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
|
130
128
|
|
@@ -137,7 +135,7 @@ def main():
|
|
137
135
|
|
138
136
|
if args.pretrained:
|
139
137
|
assert os.path.isfile(args.pretrained)
|
140
|
-
model.load_state_dict(torch.load(args.pretrained), strict=False)
|
138
|
+
model.load_state_dict(torch.load(args.pretrained, map_location='cpu'), strict=False)
|
141
139
|
model.to(args.device)
|
142
140
|
|
143
141
|
test(args, model, test_loader)
|
@@ -117,8 +117,6 @@ def options():
|
|
117
117
|
help='Add noise in source point clouds.')
|
118
118
|
parser.add_argument('--outliers', default=False, type=bool,
|
119
119
|
help='Add outliers to template point cloud.')
|
120
|
-
parser.add_argument('--root_dir', default='./', type=str,
|
121
|
-
help='path of the data where modelnet files are downloaded.')
|
122
120
|
|
123
121
|
# settings for on testing
|
124
122
|
parser.add_argument('-j', '--workers', default=1, type=int,
|
@@ -139,7 +137,7 @@ def main():
|
|
139
137
|
args = options()
|
140
138
|
torch.backends.cudnn.deterministic = True
|
141
139
|
|
142
|
-
testset = RegistrationData('PointNetLK', ModelNet40Data(train=False, num_points=args.num_points
|
140
|
+
testset = RegistrationData('PointNetLK', ModelNet40Data(train=False, num_points=args.num_points),
|
143
141
|
partial_source=args.partial_source, noise=args.noise,
|
144
142
|
additional_params={'use_masknet': True})
|
145
143
|
test_loader = DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
|
@@ -120,8 +120,6 @@ def options():
|
|
120
120
|
help='Add noise in source point clouds.')
|
121
121
|
parser.add_argument('--outliers', default=False, type=bool,
|
122
122
|
help='Add outliers to template point cloud.')
|
123
|
-
parser.add_argument('--root_dir', default='./', type=str,
|
124
|
-
help='path of the data where modelnet files are downloaded.')
|
125
123
|
|
126
124
|
# settings for on testing
|
127
125
|
parser.add_argument('-j', '--workers', default=1, type=int,
|
@@ -142,7 +140,7 @@ def main():
|
|
142
140
|
args = options()
|
143
141
|
torch.backends.cudnn.deterministic = True
|
144
142
|
|
145
|
-
testset = RegistrationData('PointNetLK', ModelNet40Data(train=False, num_points=args.num_points
|
143
|
+
testset = RegistrationData('PointNetLK', ModelNet40Data(train=False, num_points=args.num_points),
|
146
144
|
partial_template=args.partial_template, partial_source=args.partial_source,
|
147
145
|
noise=args.noise, additional_params={'use_masknet': True, 'partial_point_cloud_method': 'planar_crop'})
|
148
146
|
test_loader = DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
|
@@ -70,8 +70,6 @@ def options():
|
|
70
70
|
metavar='DATASET', help='dataset type (default: modelnet)')
|
71
71
|
parser.add_argument('--num_points', default=1024, type=int,
|
72
72
|
metavar='N', help='points in point-cloud (default: 1024)')
|
73
|
-
parser.add_argument('--root_dir', default='./', type=str,
|
74
|
-
help='path of the data where modelnet files are downloaded.')
|
75
73
|
|
76
74
|
# settings for PCN
|
77
75
|
parser.add_argument('--emb_dims', default=1024, type=int,
|
@@ -97,8 +95,8 @@ def main():
|
|
97
95
|
args = options()
|
98
96
|
args.dataset_path = os.path.join(os.getcwd(), os.pardir, os.pardir, 'ModelNet40', 'ModelNet40')
|
99
97
|
|
100
|
-
trainset = ClassificationData(ModelNet40Data(train=True
|
101
|
-
testset = ClassificationData(ModelNet40Data(train=False
|
98
|
+
trainset = ClassificationData(ModelNet40Data(train=True))
|
99
|
+
testset = ClassificationData(ModelNet40Data(train=False))
|
102
100
|
train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
|
103
101
|
test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
|
104
102
|
|
@@ -74,8 +74,6 @@ def options():
|
|
74
74
|
metavar='DATASET', help='dataset type (default: modelnet)')
|
75
75
|
parser.add_argument('--num_points', default=1024, type=int,
|
76
76
|
metavar='N', help='points in point-cloud (default: 1024)')
|
77
|
-
parser.add_argument('--root_dir', default='./', type=str,
|
78
|
-
help='path of the data where modelnet files are downloaded.')
|
79
77
|
|
80
78
|
# settings for PointNet
|
81
79
|
parser.add_argument('--emb_dims', default=1024, type=int,
|
@@ -99,7 +97,7 @@ def options():
|
|
99
97
|
def main():
|
100
98
|
args = options()
|
101
99
|
|
102
|
-
testset = RegistrationData('PCRNet', ModelNet40Data(train=False
|
100
|
+
testset = RegistrationData('PCRNet', ModelNet40Data(train=False))
|
103
101
|
test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
|
104
102
|
|
105
103
|
if not torch.cuda.is_available():
|
@@ -74,8 +74,6 @@ def options():
|
|
74
74
|
metavar='DATASET', help='dataset type (default: modelnet)')
|
75
75
|
parser.add_argument('--num_points', default=1024, type=int,
|
76
76
|
metavar='N', help='points in point-cloud (default: 1024)')
|
77
|
-
parser.add_argument('--root_dir', default='./', type=str,
|
78
|
-
help='path of the data where modelnet files are downloaded.')
|
79
77
|
|
80
78
|
# settings for PointNet
|
81
79
|
parser.add_argument('--emb_dims', default=1024, type=int,
|
@@ -100,7 +98,7 @@ def options():
|
|
100
98
|
def main():
|
101
99
|
args = options()
|
102
100
|
|
103
|
-
testset = RegistrationData('PointNetLK', ModelNet40Data(train=False
|
101
|
+
testset = RegistrationData('PointNetLK', ModelNet40Data(train=False))
|
104
102
|
test_loader = DataLoader(testset, batch_size=8, shuffle=False, drop_last=False, num_workers=args.workers)
|
105
103
|
|
106
104
|
if not torch.cuda.is_available():
|
@@ -73,8 +73,6 @@ def options():
|
|
73
73
|
metavar='DATASET', help='dataset type (default: modelnet)')
|
74
74
|
parser.add_argument('--num_points', default=1024, type=int,
|
75
75
|
metavar='N', help='points in point-cloud (default: 1024)')
|
76
|
-
parser.add_argument('--root_dir', default='./', type=str,
|
77
|
-
help='path of the data where modelnet files are downloaded.')
|
78
76
|
|
79
77
|
# settings for PointNet
|
80
78
|
parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
|
@@ -101,7 +99,7 @@ def main():
|
|
101
99
|
args = options()
|
102
100
|
args.dataset_path = os.path.join(os.getcwd(), os.pardir, os.pardir, 'ModelNet40', 'ModelNet40')
|
103
101
|
|
104
|
-
testset = ClassificationData(ModelNet40Data(train=False
|
102
|
+
testset = ClassificationData(ModelNet40Data(train=False))
|
105
103
|
test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
|
106
104
|
|
107
105
|
if not torch.cuda.is_available():
|
@@ -73,8 +73,6 @@ def options():
|
|
73
73
|
metavar='DATASET', help='dataset type (default: modelnet)')
|
74
74
|
parser.add_argument('--num_points', default=1024, type=int,
|
75
75
|
metavar='N', help='points in point-cloud (default: 1024)')
|
76
|
-
parser.add_argument('--root_dir', default='./', type=str,
|
77
|
-
help='path of the data where modelnet files are downloaded.')
|
78
76
|
|
79
77
|
# settings for PointNet
|
80
78
|
parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
|
@@ -101,7 +99,7 @@ def main():
|
|
101
99
|
args = options()
|
102
100
|
args.dataset_path = os.path.join(os.getcwd(), os.pardir, os.pardir, 'ModelNet40', 'ModelNet40')
|
103
101
|
|
104
|
-
testset = ClassificationData(ModelNet40Data(train=False
|
102
|
+
testset = ClassificationData(ModelNet40Data(train=False))
|
105
103
|
test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
|
106
104
|
|
107
105
|
if not torch.cuda.is_available():
|
@@ -79,8 +79,6 @@ def options():
|
|
79
79
|
# settings for input data
|
80
80
|
parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
|
81
81
|
metavar='DATASET', help='dataset type (default: modelnet)')
|
82
|
-
parser.add_argument('--root_dir', default='./', type=str,
|
83
|
-
help='path of the data where modelnet files are downloaded.')
|
84
82
|
|
85
83
|
# settings for PointNet
|
86
84
|
parser.add_argument('--emb_dims', default=512, type=int,
|
@@ -104,8 +102,8 @@ def main():
|
|
104
102
|
args = options()
|
105
103
|
torch.backends.cudnn.deterministic = True
|
106
104
|
|
107
|
-
trainset = RegistrationData('PRNet', ModelNet40Data(train=True
|
108
|
-
testset = RegistrationData('PRNet', ModelNet40Data(train=False
|
105
|
+
trainset = RegistrationData('PRNet', ModelNet40Data(train=True), partial_source=True, partial_template=True)
|
106
|
+
testset = RegistrationData('PRNet', ModelNet40Data(train=False), partial_source=True, partial_template=True)
|
109
107
|
train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
|
110
108
|
test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
|
111
109
|
|
@@ -119,7 +117,7 @@ def main():
|
|
119
117
|
|
120
118
|
if args.pretrained:
|
121
119
|
assert os.path.isfile(args.pretrained)
|
122
|
-
model.load_state_dict(torch.load(args.pretrained), strict=False)
|
120
|
+
model.load_state_dict(torch.load(args.pretrained, map_location='cpu'), strict=False)
|
123
121
|
model.to(args.device)
|
124
122
|
|
125
123
|
test(args, model, test_loader)
|