learning3d 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. {learning3d/data_utils → data_utils}/dataloaders.py +16 -14
  2. examples/test_curvenet.py +118 -0
  3. {learning3d/examples → examples}/test_dcp.py +3 -5
  4. {learning3d/examples → examples}/test_deepgmr.py +3 -5
  5. {learning3d/examples → examples}/test_masknet.py +1 -3
  6. {learning3d/examples → examples}/test_masknet2.py +1 -3
  7. {learning3d/examples → examples}/test_pcn.py +2 -4
  8. {learning3d/examples → examples}/test_pcrnet.py +1 -3
  9. {learning3d/examples → examples}/test_pnlk.py +1 -3
  10. {learning3d/examples → examples}/test_pointconv.py +1 -3
  11. {learning3d/examples → examples}/test_pointnet.py +1 -3
  12. {learning3d/examples → examples}/test_prnet.py +3 -5
  13. {learning3d/examples → examples}/test_rpmnet.py +1 -3
  14. {learning3d/examples → examples}/train_PointNetLK.py +2 -4
  15. {learning3d/examples → examples}/train_dcp.py +2 -4
  16. {learning3d/examples → examples}/train_deepgmr.py +2 -4
  17. {learning3d/examples → examples}/train_masknet.py +2 -4
  18. {learning3d/examples → examples}/train_pcn.py +2 -4
  19. {learning3d/examples → examples}/train_pcrnet.py +2 -4
  20. {learning3d/examples → examples}/train_pointconv.py +2 -4
  21. {learning3d/examples → examples}/train_pointnet.py +2 -4
  22. {learning3d/examples → examples}/train_prnet.py +2 -4
  23. {learning3d/examples → examples}/train_rpmnet.py +2 -4
  24. {learning3d-0.1.0.dist-info → learning3d-0.2.0.dist-info}/METADATA +56 -11
  25. learning3d-0.2.0.dist-info/RECORD +70 -0
  26. {learning3d-0.1.0.dist-info → learning3d-0.2.0.dist-info}/WHEEL +1 -1
  27. learning3d-0.2.0.dist-info/top_level.txt +6 -0
  28. {learning3d/models → models}/__init__.py +7 -1
  29. models/curvenet.py +130 -0
  30. {learning3d/models → models}/dgcnn.py +1 -35
  31. {learning3d/models → models}/prnet.py +5 -39
  32. utils/__init__.py +23 -0
  33. utils/curvenet_util.py +540 -0
  34. utils/model_common_utils.py +156 -0
  35. learning3d/losses/cuda/chamfer_distance/__init__.py +0 -1
  36. learning3d/losses/cuda/chamfer_distance/chamfer_distance.cpp +0 -185
  37. learning3d/losses/cuda/chamfer_distance/chamfer_distance.cu +0 -209
  38. learning3d/losses/cuda/chamfer_distance/chamfer_distance.py +0 -66
  39. learning3d/losses/cuda/emd_torch/pkg/emd_loss_layer.py +0 -41
  40. learning3d/losses/cuda/emd_torch/pkg/include/cuda/emd.cuh +0 -347
  41. learning3d/losses/cuda/emd_torch/pkg/include/cuda_helper.h +0 -18
  42. learning3d/losses/cuda/emd_torch/pkg/include/emd.h +0 -54
  43. learning3d/losses/cuda/emd_torch/pkg/layer/__init__.py +0 -1
  44. learning3d/losses/cuda/emd_torch/pkg/layer/emd_loss_layer.py +0 -40
  45. learning3d/losses/cuda/emd_torch/pkg/src/cuda/emd.cu +0 -70
  46. learning3d/losses/cuda/emd_torch/pkg/src/emd.cpp +0 -1
  47. learning3d/losses/cuda/emd_torch/setup.py +0 -29
  48. learning3d/ops/__init__.py +0 -0
  49. learning3d/utils/__init__.py +0 -4
  50. learning3d-0.1.0.dist-info/RECORD +0 -80
  51. learning3d-0.1.0.dist-info/top_level.txt +0 -1
  52. {learning3d/data_utils → data_utils}/__init__.py +0 -0
  53. {learning3d/data_utils → data_utils}/user_data.py +0 -0
  54. {learning3d-0.1.0.dist-info → learning3d-0.2.0.dist-info}/LICENSE +0 -0
  55. {learning3d/losses → losses}/__init__.py +0 -0
  56. {learning3d/losses → losses}/chamfer_distance.py +0 -0
  57. {learning3d/losses → losses}/classification.py +0 -0
  58. {learning3d/losses → losses}/correspondence_loss.py +0 -0
  59. {learning3d/losses → losses}/emd.py +0 -0
  60. {learning3d/losses → losses}/frobenius_norm.py +0 -0
  61. {learning3d/losses → losses}/rmse_features.py +0 -0
  62. {learning3d/models → models}/classifier.py +0 -0
  63. {learning3d/models → models}/dcp.py +0 -0
  64. {learning3d/models → models}/deepgmr.py +0 -0
  65. {learning3d/models → models}/masknet.py +0 -0
  66. {learning3d/models → models}/masknet2.py +0 -0
  67. {learning3d/models → models}/pcn.py +0 -0
  68. {learning3d/models → models}/pcrnet.py +0 -0
  69. {learning3d/models → models}/pointconv.py +0 -0
  70. {learning3d/models → models}/pointnet.py +0 -0
  71. {learning3d/models → models}/pointnetlk.py +0 -0
  72. {learning3d/models → models}/pooling.py +0 -0
  73. {learning3d/models → models}/ppfnet.py +0 -0
  74. {learning3d/models → models}/rpmnet.py +0 -0
  75. {learning3d/models → models}/segmentation.py +0 -0
  76. {learning3d → ops}/__init__.py +0 -0
  77. {learning3d/ops → ops}/data_utils.py +0 -0
  78. {learning3d/ops → ops}/invmat.py +0 -0
  79. {learning3d/ops → ops}/quaternion.py +0 -0
  80. {learning3d/ops → ops}/se3.py +0 -0
  81. {learning3d/ops → ops}/sinc.py +0 -0
  82. {learning3d/ops → ops}/so3.py +0 -0
  83. {learning3d/ops → ops}/transform_functions.py +0 -0
  84. {learning3d/utils → utils}/pointconv_util.py +0 -0
  85. {learning3d/utils → utils}/ppfnet_util.py +0 -0
  86. {learning3d/utils → utils}/svd.py +0 -0
  87. {learning3d/utils → utils}/transformer.py +0 -0
@@ -146,8 +146,6 @@ def options():
146
146
  metavar='DATASET', help='dataset type (default: modelnet)')
147
147
  parser.add_argument('--num_points', default=1024, type=int,
148
148
  metavar='N', help='points in point-cloud (default: 1024)')
149
- parser.add_argument('--root_dir', default='./', type=str,
150
- help='path of the data where modelnet files are downloaded.')
151
149
 
152
150
  # settings for PointNet
153
151
  parser.add_argument('--fine_tune_pointnet', default='tune', type=str, choices=['fixed', 'tune'],
@@ -196,8 +194,8 @@ def main():
196
194
  textio.cprint(str(args))
197
195
 
198
196
 
199
- trainset = RegistrationData('RPMNet', ModelNet40Data(train=True, num_points=args.num_points, use_normals=True, root_dir=args.root_dir), partial_source=True, partial_template=True)
200
- testset = RegistrationData('RPMNet', ModelNet40Data(train=False, num_points=args.num_points, use_normals=True, root_dir=args.root_dir), partial_source=True, partial_template=True)
197
+ trainset = RegistrationData('RPMNet', ModelNet40Data(train=True, num_points=args.num_points, use_normals=True), partial_source=True, partial_template=True)
198
+ testset = RegistrationData('RPMNet', ModelNet40Data(train=False, num_points=args.num_points, use_normals=True), partial_source=True, partial_template=True)
201
199
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
202
200
  test_loader = DataLoader(testset, batch_size=8, shuffle=False, drop_last=False, num_workers=args.workers)
203
201
 
@@ -1,10 +1,35 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: learning3d
3
- Version: 0.1.0
3
+ Version: 0.2.0
4
4
  Summary: Learning3D: A Modern Library for Deep Learning on 3D Point Clouds Data
5
5
  Author-email: Vinit Sarode <vinitsarode5@gmail.com>
6
+ Maintainer-email: Vinit Sarode <vinitsarode5@gmail.com>
7
+ License: The MIT License
8
+
9
+ Copyright (c) 2010-2019 Google, Inc. http://angularjs.org
10
+
11
+ Permission is hereby granted, free of charge, to any person obtaining a copy
12
+ of this software and associated documentation files (the "Software"), to deal
13
+ in the Software without restriction, including without limitation the rights
14
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15
+ copies of the Software, and to permit persons to whom the Software is
16
+ furnished to do so, subject to the following conditions:
17
+
18
+ The above copyright notice and this permission notice shall be included in
19
+ all copies or substantial portions of the Software.
20
+
21
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27
+ THE SOFTWARE.
6
28
  Project-URL: Homepage, https://github.com/vinits5/learning3d
29
+ Project-URL: Repository, https://github.com/vinits5/learning3d
7
30
  Project-URL: Issues, https://github.com/vinits5/learning3d/issues
31
+ Project-URL: Changelog, https://github.com/vinits5/learning3d/CHANGELOG.md
32
+ Keywords: Point Clouds,Deep Learning,3D Vision,Point Cloud Registration,Point Cloud Classification,Point Cloud Segmentation
8
33
  Classifier: Programming Language :: Python :: 3
9
34
  Classifier: License :: OSI Approved :: MIT License
10
35
  Classifier: Operating System :: OS Independent
@@ -22,6 +47,7 @@ Requires-Dist: scikit-learn ==1.2.2
22
47
  Requires-Dist: scipy ==1.10.1
23
48
  Requires-Dist: numpy ==1.24.3
24
49
  Requires-Dist: transforms3d ==0.4.1
50
+ Requires-Dist: pycuda
25
51
 
26
52
  <p align="center">
27
53
  <img src="https://github.com/vinits5/learning3d/blob/master/images/logo.png" height="170">
@@ -34,24 +60,40 @@ Requires-Dist: transforms3d ==0.4.1
34
60
  Learning3D is an open-source library that supports the development of deep learning algorithms that deal with 3D data. The Learning3D exposes a set of state of art deep neural networks in python. A modular code has been provided for further development. We welcome contributions from the open-source community.
35
61
 
36
62
  ## Latest News:
37
- 1. \[24 Oct, 2023\]: [MaskNet++](https://github.com/zhouruqin/MaskNet2) is now a part of learning3d library.
38
- 2. \[12 May, 2022\]: [ChamferDistance](https://github.com/fwilliams/fml) loss function is incorporated in learning3d. This is a purely pytorch based loss function.
39
- 3. \[24 Dec. 2020\]: [MaskNet](https://arxiv.org/pdf/2010.09185.pdf) is now ready to enhance the performance of registration algorithms in learning3d for occluded point clouds.
40
- 4. \[24 Dec. 2020\]: Loss based on the predicted and ground truth correspondences is added in learning3d after consideration of [Correspondence Matrices are Underrated](https://arxiv.org/pdf/2010.16085.pdf) paper.
41
- 5. \[24 Dec. 2020\]: [PointConv](https://arxiv.org/abs/1811.07246), latent feature estimation using convolutions on point clouds is now available in learning3d.
42
- 6. \[16 Oct. 2020\]: [DeepGMR](https://wentaoyuan.github.io/deepgmr/), registration using gaussian mixture models is now available in learning3d
43
- 7. \[14 Oct. 2020\]: Now, use your own data in learning3d. (Check out [UserData](https://github.com/vinits5/learning3d#use-your-own-data) functionality!)
63
+ 1. \[28 Feb, 2025\]: [CurveNet](https://github.com/tiangexiang/CurveNet) is now a part of learning3d library.
64
+ 2. \[7 Apr, 2024\]: Now, learning3d is available as pypi package.
65
+ 3. \[24 Oct, 2023\]: [MaskNet++](https://github.com/zhouruqin/MaskNet2) is now a part of learning3d library.
66
+ 4. \[12 May, 2022\]: [ChamferDistance](https://github.com/fwilliams/fml) loss function is incorporated in learning3d. This is a purely pytorch based loss function.
67
+ 5. \[24 Dec. 2020\]: [MaskNet](https://arxiv.org/pdf/2010.09185.pdf) is now ready to enhance the performance of registration algorithms in learning3d for occluded point clouds.
68
+ 6. \[24 Dec. 2020\]: Loss based on the predicted and ground truth correspondences is added in learning3d after consideration of [Correspondence Matrices are Underrated](https://arxiv.org/pdf/2010.16085.pdf) paper.
69
+ 7. \[24 Dec. 2020\]: [PointConv](https://arxiv.org/abs/1811.07246), latent feature estimation using convolutions on point clouds is now available in learning3d.
70
+ 8. \[16 Oct. 2020\]: [DeepGMR](https://wentaoyuan.github.io/deepgmr/), registration using gaussian mixture models is now available in learning3d
71
+ 9. \[14 Oct. 2020\]: Now, use your own data in learning3d. (Check out [UserData](https://github.com/vinits5/learning3d#use-your-own-data) functionality!)
72
+
73
+ ## PyPI package setup
74
+ ### Setup from pypi server
75
+ ```
76
+ pip install learning3d
77
+ ```
78
+
79
+ ### Setup using code
80
+ ```
81
+ git clone https://github.com/vinits5/learning3d.git
82
+ cd learning3d
83
+ git checkout pypi_v0.1.0
84
+ python3 -m pip install .
85
+ ```
44
86
 
45
87
  ## Available Computer Vision Algorithms in Learning3D
46
88
 
47
89
  | Sr. No. | Tasks | Algorithms |
48
90
  |:-------------:|:----------:|:-----|
49
- | 1 | [Classification](https://github.com/vinits5/learning3d#use-of-classification--segmentation-network) | PointNet, DGCNN, PPFNet, [PointConv](https://github.com/vinits5/learning3d#use-of-pointconv) |
91
+ | 1 | [Classification](https://github.com/vinits5/learning3d#use-of-classification--segmentation-network) | PointNet, DGCNN, PPFNet, [PointConv](https://github.com/vinits5/learning3d#use-of-pointconv), [CurveNet](https://github.com/tiangexiang/CurveNet) |
50
92
  | 2 | [Segmentation](https://github.com/vinits5/learning3d#use-of-classification--segmentation-network) | PointNet, DGCNN |
51
93
  | 3 | [Reconstruction](https://github.com/vinits5/learning3d#use-of-point-completion-network) | Point Completion Network (PCN) |
52
94
  | 4 | [Registration](https://github.com/vinits5/learning3d#use-of-registration-networks) | PointNetLK, PCRNet, DCP, PRNet, RPM-Net, DeepGMR |
53
95
  | 5 | [Flow Estimation](https://github.com/vinits5/learning3d#use-of-flow-estimation-network) | FlowNet3D |
54
- | 6 | [Inlier Estimation](https://github.com/vinits5/learning3d#use-of-inlier-estimation-network-masknet) | MaskNet, MaskNet++ |
96
+ | 6 | [Inlier Estimation](https://github.com/vinits5/learning3d#use-of-inlier-estimation-network-masknet) | MaskNet, [MaskNet++](https://github.com/zhouruqin/MaskNet2) |
55
97
 
56
98
  ## Available Pretrained Models
57
99
  1. PointNet
@@ -66,6 +108,7 @@ Learning3D is an open-source library that supports the development of deep learn
66
108
  10. PointConv (Download from this [link](https://github.com/DylanWusee/pointconv_pytorch/blob/master/checkpoints/checkpoint.pth))
67
109
  11. MaskNet
68
110
  12. MaskNet++ / MaskNet2
111
+ 13. CurveNet
69
112
 
70
113
  ## Available Datasets
71
114
  1. ModelNet40
@@ -81,7 +124,8 @@ Learning3D is an open-source library that supports the development of deep learn
81
124
  1. Ubuntu 16.04
82
125
  2. Ubuntu 18.04
83
126
  3. Ubuntu 20.04.6
84
- 3. Linux Mint
127
+ 4. Linux Mint
128
+ 5. macOS Sequoia 15.3.1
85
129
 
86
130
  ### Requirements
87
131
  1. CUDA 10.0 or higher
@@ -268,3 +312,4 @@ PointConv variable is a class. Users can use it to create a sub-class to overrid
268
312
  14. [CMU:](https://arxiv.org/pdf/2010.16085.pdf) Correspondence Matrices are Underrated
269
313
  15. [MaskNet:](https://arxiv.org/pdf/2010.09185.pdf) A Fully-Convolutional Network to Estimate Inlier Points
270
314
  16. [MaskNet++:](https://www.sciencedirect.com/science/article/abs/pii/S0097849322000085) Inlier/outlier identification for two point clouds
315
+ 17. [CurveNet:](https://github.com/tiangexiang/CurveNet) Walk in the Cloud: Learning Curves for Point Clouds Shape Analysis
@@ -0,0 +1,70 @@
1
+ data_utils/__init__.py,sha256=iYAVh0FThnVlG42QIgmDYrC3NGVYuzKX8s1oRqAI1YU,261
2
+ data_utils/dataloaders.py,sha256=kb0wsLlMN7sB-CS_4BGSprSaZBwkUNYxS5iwUdD6JJM,14871
3
+ data_utils/user_data.py,sha256=ADDGeCUCr6TcXhcxvAFncIeLO71xoRHYi4H418ktvQs,4828
4
+ examples/test_curvenet.py,sha256=Ly-Pp7lhS9kWgyOeCRqlfLrLFOerH76jxb4BWtdPGOA,4085
5
+ examples/test_dcp.py,sha256=-qFgrg5uXdaMR3JF4zxNrmAqZspIgzdKkFFaJPmp-4Y,5626
6
+ examples/test_deepgmr.py,sha256=ux5BjmhtD2kut4zbSkhi6cIMH7BySuVCRKGHqRusEYw,5617
7
+ examples/test_masknet.py,sha256=dkqUui9sv2SzHtvtzUzL_PxJxMcBMqCSDPAYg0BWAVU,6405
8
+ examples/test_masknet2.py,sha256=3_XWBQOwQjK3BCQ__BmPhCvYI_0hZMK3X4C-P2Krw6w,6859
9
+ examples/test_pcn.py,sha256=4eaosjJVqiFxlqnaUWu-O2Jawt4uU16UJEzitIjP2us,4342
10
+ examples/test_pcrnet.py,sha256=_x9l55sMBACXUfQHLaH7GJMfz6PWdYWSxjrRxVoY-As,4366
11
+ examples/test_pnlk.py,sha256=9u7B--PuCpl6TAmObmIKfDvYW6bMj3Jcc3_djTDO-D4,4456
12
+ examples/test_pointconv.py,sha256=NUcLjkkNJsGZYaUZHug6QGybm8NshZfg2tc8rfksNU8,4673
13
+ examples/test_pointnet.py,sha256=VKfB5DE8fh2G1iIoY02GvjSgWUJk2jQBmmJs3HC5rVU,4324
14
+ examples/test_prnet.py,sha256=S8Q7to5NH4Mz39UUzXPmYxaMgiaFZIJBuwlYR2fZByQ,4921
15
+ examples/test_rpmnet.py,sha256=oy-z7I26IQxr4TD_p0qCRnOn6H8VbQFyiWO83ZSFcDk,4476
16
+ examples/train_PointNetLK.py,sha256=0GgT2NYKNZl8o02rvW-nYBO_1tlfDNuakuAXtm1V16c,8773
17
+ examples/train_dcp.py,sha256=SQVrwnZqGmFCZv_X2tzMysBmv-HI9sllZMWw5zsW3NM,9511
18
+ examples/train_deepgmr.py,sha256=vxdkgfQZPtwuYryR0chegTiLuXOQag8r_ccGJ6qtw7o,9397
19
+ examples/train_masknet.py,sha256=XzgWsmVAm5Lk21mH9qhvNN0um4pI1fYVfsBAV4deSOM,8889
20
+ examples/train_pcn.py,sha256=X7MSYVXwgIMExplua1M9pG20eNhZ_0p83yTADSYrAlA,7542
21
+ examples/train_pcrnet.py,sha256=KQ8MiDUiR46qS9t7tc5POJ3NjMyZFBEPOVQY-7Vszpk,8198
22
+ examples/train_pointconv.py,sha256=noGT2yGWHAuecObz1X9cEiWl0xjh7NhmRneP88jR8uI,8939
23
+ examples/train_pointnet.py,sha256=SXheDRP_GHZQQw4BEYS7bfL481D8GcbTekON-GVwOsk,8840
24
+ examples/train_prnet.py,sha256=2zvd-3cYzZP8L92XJmFL5rTzxpApUhetiEc4u4V0X5g,8373
25
+ examples/train_rpmnet.py,sha256=PEdFgPXyeME0axvLEd--VbpbqWV6P5i6NnjQnJ_X3Oo,8530
26
+ losses/__init__.py,sha256=zjjZeA_NvAhZlxiYBbtgjEsvMyLFhFWXlZioitrlGWw,425
27
+ losses/chamfer_distance.py,sha256=UTZ6x5cGwL3L5hJZOWoC35gTzcKh1S0yCg8vGuGXU1w,2121
28
+ losses/classification.py,sha256=QgDHC5VgSga6BiuD4Ee70t6vvchlE97BY0KExevhdgk,374
29
+ losses/correspondence_loss.py,sha256=Tcq2o5eLY7j50pibAuH0vBcUTjwZ-wHNzGZD4H6mAe0,583
30
+ losses/emd.py,sha256=DqP77dN6lPkpGGgBz10oO6YNYxt889weYbVYj6bZFUM,389
31
+ losses/frobenius_norm.py,sha256=IuKr0DT9aPBlc5fjIy6lJ082yOh9F8xiNoXF6FvWZtY,682
32
+ losses/rmse_features.py,sha256=_KMqIWqH9-lH2P6YSeGfSOIbP7plUAwWWBh2Cu7cpXA,453
33
+ models/__init__.py,sha256=1MosMXTuwQDQ6El5o1vIrnfnfRlDvBSLc0JeMoJMFbw,673
34
+ models/classifier.py,sha256=_LUNXbLrpKNXmCkO2R1mz64dbwfrze7f_4SYT1Z6SYo,1205
35
+ models/curvenet.py,sha256=yHGjTwPYkCGMCL1BPZ0t2NPQNh6kxairyJEBbjhjjEk,5461
36
+ models/dcp.py,sha256=LZFgtk9f9f9s3QvX65nFXGgC33yGIZuy4XjviwH8OGE,3377
37
+ models/deepgmr.py,sha256=vIxOQrZjvOCHLElJCjZ8EcZ-vm0-v71IKsPGuSF-elE,5298
38
+ models/dgcnn.py,sha256=Z_5My91H8pcG0HGF75DSI3svbsZ6-ASV-0xx8UrdEt8,1989
39
+ models/masknet.py,sha256=ElMF3b-JgYmgwSEf1taGQvhA7Xy7_MiHEofzc03VCd8,2705
40
+ models/masknet2.py,sha256=6lgukurfzUOY-6xdCpMljOYFtvADLSczAXJzRC3Jkh4,9063
41
+ models/pcn.py,sha256=FvpjLR6t3kFQ1I4Fhpbsaj_P8Ml6S912x36JAZ1dUKs,5346
42
+ models/pcrnet.py,sha256=6C6iM3XkDNdgihtPIdy09RgFD2KKDCnDzLvFfp6X-Tg,2755
43
+ models/pointconv.py,sha256=lJ3_3uslE29lO3roZiE5vxr5971AWV5ExeVTzbEl858,5151
44
+ models/pointnet.py,sha256=qgSWLJ4N5Y7ObAwKiJH29Pcl67jm3sfqbXqi3tQbUQg,3238
45
+ models/pointnetlk.py,sha256=Zl66LjDX1vLdZRgCdY2oQJnpWpqPEx6BH8GbcVCsw68,5805
46
+ models/pooling.py,sha256=vOzJMambeG7kf1pnox5p5FE2CVH2iMDGU_DgWRw15WQ,412
47
+ models/ppfnet.py,sha256=aBzWvtNHFo-eu1kWoZmPI9xJOFHyxYHjdapb6cN2Aso,2894
48
+ models/prnet.py,sha256=MxhoSM8xPlwL4CSFKhDDJAx08-U1Dfzkoh1T0wHygHg,17493
49
+ models/rpmnet.py,sha256=eMVqJ6BalY96TSB8VFXjCJIA15J0XAB3BEpMB-6CMdM,11517
50
+ models/segmentation.py,sha256=CjlINj5M0Y6C-CejrieIu9ZkuwEoCFNjq_hr5SX9umU,1166
51
+ ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
+ ops/data_utils.py,sha256=qW8FOQWgcHjuuiSaAayJ3nKJnDW_GDv3ujE-uFvWsPE,1764
53
+ ops/invmat.py,sha256=m1Mm2mQNn4KgQv54w-ek0xLkZuKnfDlF2ooylXeXvAw,4154
54
+ ops/quaternion.py,sha256=D00IL6VHitYy4CINFAilD0t0kyBjc_JHfKuMrJGJ1Cw,6793
55
+ ops/se3.py,sha256=x6oLbQzLOXiM0xDJsVUCUE1liZ_TaJzkkHQvIyjqCqI,3957
56
+ ops/sinc.py,sha256=A_Ffu07RXGx3daZn4zOGdnW10_l06cmhFdAiU4NKhcw,5228
57
+ ops/so3.py,sha256=b0tX5nHyF2Qtp8V0ejGKaPaHJQ_G38ifQ7gSJzRU1ts,5166
58
+ ops/transform_functions.py,sha256=hvNjZO-uJodsGYtQwtAtDxtQ6uBpA7Lv9t-_yAg6wxo,12806
59
+ utils/__init__.py,sha256=QCalqFqrdSWsu2_fZXJoIARv1uI2GiUx8MVtZ8PoiRw,650
60
+ utils/curvenet_util.py,sha256=dcOreJBJddptH_COJkKGlfQnnoHPAnKlnUdA8sQg3GI,19527
61
+ utils/model_common_utils.py,sha256=05cWF97LJBat8rKKVbihb5DgaC_mMv5uY5yOVzH4Dx0,5320
62
+ utils/pointconv_util.py,sha256=kJxGztai7X15YsGuorMOc50SPtj_k1yfkP4XCTzIWdM,14331
63
+ utils/ppfnet_util.py,sha256=HEoxkgUBlawKZLWspfQm3caWUyAMIrW-ECtStNYbe2Y,7989
64
+ utils/svd.py,sha256=yCYQt2SKqeIzCBnBEr_8xFR79m4fIoNVFnp77epn1dM,1936
65
+ utils/transformer.py,sha256=UDgJvnh7ekWyijaAn-a3ckeFeMxlK_chXzWlhAGDiPM,8974
66
+ learning3d-0.2.0.dist-info/LICENSE,sha256=3qY3_NeQIvalbLlsHFtOfuUKjs_U2k6u7rf6YVx6ac0,1098
67
+ learning3d-0.2.0.dist-info/METADATA,sha256=QFrlYdTQ9UEEdKwDnAf8mk8kc80qegmwWYbpYaXJXRQ,18079
68
+ learning3d-0.2.0.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
69
+ learning3d-0.2.0.dist-info/top_level.txt,sha256=80jcwB70RZ4Xr5DiY8ngxdX9YTRT7b7YvApYsqoGHak,44
70
+ learning3d-0.2.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.43.0)
2
+ Generator: setuptools (75.3.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -0,0 +1,6 @@
1
+ data_utils
2
+ examples
3
+ losses
4
+ models
5
+ ops
6
+ utils
@@ -15,4 +15,10 @@ from .rpmnet import RPMNet
15
15
  from .pcn import PCN
16
16
  from .deepgmr import DeepGMR
17
17
  from .masknet import MaskNet
18
- from .masknet2 import MaskNet2
18
+ from .masknet2 import MaskNet2
19
+ from .curvenet import CurveNet
20
+
21
+ try:
22
+ from .flownet3d import FlowNet3D
23
+ except:
24
+ print("Error raised in pointnet2 module for FlowNet3D Network!\nEither don't use pointnet2_utils or retry it's setup.")
models/curvenet.py ADDED
@@ -0,0 +1,130 @@
1
+ """
2
+ @Author: Tiange Xiang
3
+ @Contact: txia7609@uni.sydney.edu.au
4
+ @File: curvenet_cls.py
5
+ @Time: 2021/01/21 3:10 PM
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from .. utils import (
12
+ index_points,
13
+ farthest_point_sample,
14
+ query_ball_point,
15
+ LPFA,
16
+ CIC
17
+ )
18
+
19
+ def sample_and_group(npoint, radius, nsample, xyz, points, returnfps=False):
20
+ """
21
+ Input:
22
+ npoint:
23
+ radius:
24
+ nsample:
25
+ xyz: input points position data, [B, N, 3]
26
+ points: input points data, [B, N, D]
27
+ Return:
28
+ new_xyz: sampled points position data, [B, npoint, nsample, 3]
29
+ new_points: sampled points data, [B, npoint, nsample, 3+D]
30
+ """
31
+ new_xyz = index_points(xyz, farthest_point_sample(xyz, npoint))
32
+ torch.cuda.empty_cache()
33
+
34
+ idx = query_ball_point(radius, nsample, xyz, new_xyz)
35
+ torch.cuda.empty_cache()
36
+
37
+ new_points = index_points(points, idx)
38
+ torch.cuda.empty_cache()
39
+
40
+ if returnfps:
41
+ return new_xyz, new_points, idx
42
+ else:
43
+ return new_xyz, new_points
44
+
45
+ curve_config = {
46
+ 'default': [[100, 5], [100, 5], None, None],
47
+ 'long': [[10, 30], None, None, None]
48
+ }
49
+
50
+ class CurveNet(nn.Module):
51
+ def __init__(self, num_classes=40, k=20, setting='default', input_shape="bnc", emb_dims=2048, classifier=True):
52
+ super(CurveNet, self).__init__()
53
+
54
+ if input_shape not in ["bcn", "bnc"]:
55
+ raise ValueError("Allowed shapes are 'bcn' (batch * channels * num_in_points), 'bnc' ")
56
+
57
+ self.input_shape = input_shape
58
+
59
+ assert setting in curve_config
60
+
61
+ additional_channel = 32
62
+ self.classifier = classifier
63
+ self.lpfa = LPFA(9, additional_channel, k=k, mlp_num=1, initial=True)
64
+
65
+ # encoder
66
+ self.cic11 = CIC(npoint=1024, radius=0.05, k=k, in_channels=additional_channel, output_channels=64, bottleneck_ratio=2, mlp_num=1, curve_config=curve_config[setting][0])
67
+ self.cic12 = CIC(npoint=1024, radius=0.05, k=k, in_channels=64, output_channels=64, bottleneck_ratio=4, mlp_num=1, curve_config=curve_config[setting][0])
68
+
69
+ self.cic21 = CIC(npoint=1024, radius=0.05, k=k, in_channels=64, output_channels=128, bottleneck_ratio=2, mlp_num=1, curve_config=curve_config[setting][1])
70
+ self.cic22 = CIC(npoint=1024, radius=0.1, k=k, in_channels=128, output_channels=128, bottleneck_ratio=4, mlp_num=1, curve_config=curve_config[setting][1])
71
+
72
+ self.cic31 = CIC(npoint=256, radius=0.1, k=k, in_channels=128, output_channels=256, bottleneck_ratio=2, mlp_num=1, curve_config=curve_config[setting][2])
73
+ self.cic32 = CIC(npoint=256, radius=0.2, k=k, in_channels=256, output_channels=256, bottleneck_ratio=4, mlp_num=1, curve_config=curve_config[setting][2])
74
+
75
+ self.cic41 = CIC(npoint=64, radius=0.2, k=k, in_channels=256, output_channels=512, bottleneck_ratio=2, mlp_num=1, curve_config=curve_config[setting][3])
76
+ self.cic42 = CIC(npoint=64, radius=0.4, k=k, in_channels=512, output_channels=512, bottleneck_ratio=4, mlp_num=1, curve_config=curve_config[setting][3])
77
+
78
+ self.conv0 = nn.Sequential(
79
+ nn.Conv1d(512, emb_dims//2, kernel_size=1, bias=False),
80
+ nn.BatchNorm1d(emb_dims//2),
81
+ nn.ReLU(inplace=True))
82
+
83
+ if self.classifier:
84
+ self.conv1 = nn.Linear(emb_dims, 512, bias=False)
85
+ self.conv2 = nn.Linear(512, num_classes)
86
+ self.bn1 = nn.BatchNorm1d(512)
87
+ self.dp1 = nn.Dropout(p=0.5)
88
+
89
+ def forward(self, xyz, get_flatten_curve_idxs=False):
90
+ flatten_curve_idxs = {}
91
+ if self.input_shape == 'bnc':
92
+ xyz = xyz.permute(0, 2, 1)
93
+
94
+ l0_points = self.lpfa(xyz, xyz)
95
+
96
+ l1_xyz, l1_points, flatten_curve_idxs_11 = self.cic11(xyz, l0_points)
97
+ flatten_curve_idxs['flatten_curve_idxs_11'] = flatten_curve_idxs_11
98
+ l1_xyz, l1_points, flatten_curve_idxs_12 = self.cic12(l1_xyz, l1_points)
99
+ flatten_curve_idxs['flatten_curve_idxs_12'] = flatten_curve_idxs_12
100
+
101
+ l2_xyz, l2_points, flatten_curve_idxs_21 = self.cic21(l1_xyz, l1_points)
102
+ flatten_curve_idxs['flatten_curve_idxs_21'] = flatten_curve_idxs_21
103
+ l2_xyz, l2_points, flatten_curve_idxs_22 = self.cic22(l2_xyz, l2_points)
104
+ flatten_curve_idxs['flatten_curve_idxs_22'] = flatten_curve_idxs_22
105
+
106
+ l3_xyz, l3_points, flatten_curve_idxs_31 = self.cic31(l2_xyz, l2_points)
107
+ flatten_curve_idxs['flatten_curve_idxs_31'] = flatten_curve_idxs_31
108
+ l3_xyz, l3_points, flatten_curve_idxs_32 = self.cic32(l3_xyz, l3_points)
109
+ flatten_curve_idxs['flatten_curve_idxs_32'] = flatten_curve_idxs_32
110
+
111
+ l4_xyz, l4_points, flatten_curve_idxs_41 = self.cic41(l3_xyz, l3_points)
112
+ flatten_curve_idxs['flatten_curve_idxs_41'] = flatten_curve_idxs_41
113
+ l4_xyz, l4_points, flatten_curve_idxs_42 = self.cic42(l4_xyz, l4_points)
114
+ flatten_curve_idxs['flatten_curve_idxs_42'] = flatten_curve_idxs_42
115
+
116
+ x = self.conv0(l4_points)
117
+ x_max = F.adaptive_max_pool1d(x, 1)
118
+ x_avg = F.adaptive_avg_pool1d(x, 1)
119
+
120
+ x = torch.cat((x_max, x_avg), dim=1).squeeze(-1)
121
+
122
+ if self.classifier:
123
+ x = F.relu(self.bn1(self.conv1(x).unsqueeze(-1)), inplace=True).squeeze(-1)
124
+ x = self.dp1(x)
125
+ x = self.conv2(x)
126
+
127
+ if get_flatten_curve_idxs:
128
+ return x, flatten_curve_idxs
129
+ else:
130
+ return x
@@ -1,40 +1,6 @@
1
1
  import torch
2
2
  import torch.nn.functional as F
3
-
4
- def knn(x, k):
5
- inner = -2 * torch.matmul(x.transpose(2, 1).contiguous(), x)
6
- xx = torch.sum(x ** 2, dim=1, keepdim=True)
7
- pairwise_distance = -xx - inner - xx.transpose(2, 1).contiguous()
8
-
9
- idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
10
- return idx
11
-
12
-
13
- def get_graph_feature(x, k=20):
14
- # x = x.squeeze()
15
- idx = knn(x, k=k) # (batch_size, num_points, k)
16
- batch_size, num_points, _ = idx.size()
17
-
18
- device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
19
-
20
- idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
21
-
22
- idx = idx + idx_base
23
-
24
- idx = idx.view(-1)
25
-
26
- _, num_dims, _ = x.size()
27
-
28
- # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
29
- x = x.transpose(2, 1).contiguous()
30
-
31
- feature = x.view(batch_size * num_points, -1)[idx, :]
32
- feature = feature.view(batch_size, num_points, k, num_dims)
33
- x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
34
-
35
- feature = torch.cat((feature, x), dim=3).permute(0, 3, 1, 2)
36
-
37
- return feature
3
+ from .. utils import knn, get_graph_feature
38
4
 
39
5
 
40
6
  class DGCNN(torch.nn.Module):
@@ -16,7 +16,7 @@ import torch.nn as nn
16
16
  import torch.nn.functional as F
17
17
 
18
18
  from .. ops import transform_functions as transform
19
- from .. utils import Transformer, Identity
19
+ from .. utils import Transformer, Identity, knn, get_graph_feature
20
20
 
21
21
  from sklearn.metrics import r2_score
22
22
 
@@ -30,40 +30,6 @@ def pairwise_distance(src, tgt):
30
30
  distances = xx.transpose(2, 1).contiguous() + inner + yy
31
31
  return torch.sqrt(distances)
32
32
 
33
-
34
- def knn(x, k):
35
- inner = -2 * torch.matmul(x.transpose(2, 1).contiguous(), x)
36
- xx = torch.sum(x ** 2, dim=1, keepdim=True)
37
- distance = -xx - inner - xx.transpose(2, 1).contiguous()
38
-
39
- idx = distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
40
- return idx
41
-
42
-
43
- def get_graph_feature(x, k=20):
44
- # x = x.squeeze()
45
- x = x.view(*x.size()[:3])
46
- idx = knn(x, k=k) # (batch_size, num_points, k)
47
- batch_size, num_points, _ = idx.size()
48
-
49
- idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
50
-
51
- idx = idx + idx_base
52
-
53
- idx = idx.view(-1)
54
-
55
- _, num_dims, _ = x.size()
56
-
57
- x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
58
- feature = x.view(batch_size * num_points, -1)[idx, :]
59
- feature = feature.view(batch_size, num_points, k, num_dims)
60
- x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
61
-
62
- feature = torch.cat((feature, x), dim=3).permute(0, 3, 1, 2)
63
-
64
- return feature
65
-
66
-
67
33
  def cycle_consistency(rotation_ab, translation_ab, rotation_ba, translation_ba):
68
34
  batch_size = rotation_ab.size(0)
69
35
  identity = torch.eye(3, device=rotation_ab.device).unsqueeze(0).repeat(batch_size, 1, 1)
@@ -109,19 +75,19 @@ class DGCNN(nn.Module):
109
75
 
110
76
  def forward(self, x):
111
77
  batch_size, num_dims, num_points = x.size()
112
- x = get_graph_feature(x)
78
+ x = get_graph_feature(x, device=device)
113
79
  x = F.leaky_relu(self.bn1(self.conv1(x)), negative_slope=0.2)
114
80
  x1 = x.max(dim=-1, keepdim=True)[0]
115
81
 
116
- x = get_graph_feature(x1)
82
+ x = get_graph_feature(x1, device=device)
117
83
  x = F.leaky_relu(self.bn2(self.conv2(x)), negative_slope=0.2)
118
84
  x2 = x.max(dim=-1, keepdim=True)[0]
119
85
 
120
- x = get_graph_feature(x2)
86
+ x = get_graph_feature(x2, device=device)
121
87
  x = F.leaky_relu(self.bn3(self.conv3(x)), negative_slope=0.2)
122
88
  x3 = x.max(dim=-1, keepdim=True)[0]
123
89
 
124
- x = get_graph_feature(x3)
90
+ x = get_graph_feature(x3, device=device)
125
91
  x = F.leaky_relu(self.bn4(self.conv4(x)), negative_slope=0.2)
126
92
  x4 = x.max(dim=-1, keepdim=True)[0]
127
93
 
utils/__init__.py ADDED
@@ -0,0 +1,23 @@
1
+ from .svd import SVDHead
2
+ from .transformer import Transformer, Identity
3
+ from .ppfnet_util import angle_difference, square_distance, index_points, farthest_point_sample, query_ball_point, sample_and_group, sample_and_group_multi
4
+ from .pointconv_util import PointConvDensitySetAbstraction
5
+ from .model_common_utils import (
6
+ knn,
7
+ pc_normalize,
8
+ square_distance,
9
+ index_points,
10
+ farthest_point_sample,
11
+ knn_point,
12
+ query_ball_point,
13
+ get_graph_feature
14
+ )
15
+ from .curvenet_util import (
16
+ LPFA,
17
+ CIC,
18
+ )
19
+
20
+ try:
21
+ from .lib import pointnet2_utils
22
+ except:
23
+ print("Error raised in pointnet2 module in utils!\nEither don't use pointnet2_utils or retry it's setup.")