ocnn 2.2.5__tar.gz → 2.2.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {ocnn-2.2.5 → ocnn-2.2.7}/LICENSE +21 -21
  2. {ocnn-2.2.5 → ocnn-2.2.7}/MANIFEST.in +1 -1
  3. ocnn-2.2.7/PKG-INFO +112 -0
  4. ocnn-2.2.7/README.md +82 -0
  5. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/__init__.py +24 -24
  6. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/dataset.py +160 -160
  7. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/models/__init__.py +29 -29
  8. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/models/autoencoder.py +155 -155
  9. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/models/hrnet.py +192 -192
  10. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/models/image2shape.py +128 -128
  11. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/models/lenet.py +46 -46
  12. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/models/ounet.py +94 -94
  13. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/models/resnet.py +53 -53
  14. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/models/segnet.py +72 -72
  15. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/models/unet.py +105 -105
  16. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/modules/__init__.py +26 -26
  17. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/modules/modules.py +303 -303
  18. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/modules/resblocks.py +158 -158
  19. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/nn/__init__.py +44 -44
  20. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/nn/octree2col.py +53 -53
  21. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/nn/octree2vox.py +50 -50
  22. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/nn/octree_align.py +46 -46
  23. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/nn/octree_conv.py +429 -429
  24. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/nn/octree_drop.py +55 -55
  25. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/nn/octree_dwconv.py +222 -222
  26. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/nn/octree_gconv.py +79 -79
  27. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/nn/octree_interp.py +196 -196
  28. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/nn/octree_norm.py +126 -126
  29. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/nn/octree_pad.py +39 -39
  30. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/nn/octree_pool.py +200 -200
  31. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/octree/__init__.py +22 -22
  32. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/octree/octree.py +661 -659
  33. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/octree/points.py +323 -322
  34. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/octree/shuffled_key.py +115 -115
  35. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn/utils.py +205 -205
  36. ocnn-2.2.7/ocnn.egg-info/PKG-INFO +112 -0
  37. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn.egg-info/not-zip-safe +1 -1
  38. {ocnn-2.2.5 → ocnn-2.2.7}/setup.cfg +9 -9
  39. {ocnn-2.2.5 → ocnn-2.2.7}/setup.py +35 -35
  40. ocnn-2.2.5/PKG-INFO +0 -80
  41. ocnn-2.2.5/README.md +0 -61
  42. ocnn-2.2.5/ocnn.egg-info/PKG-INFO +0 -80
  43. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn.egg-info/SOURCES.txt +0 -0
  44. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn.egg-info/dependency_links.txt +0 -0
  45. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn.egg-info/requires.txt +0 -0
  46. {ocnn-2.2.5 → ocnn-2.2.7}/ocnn.egg-info/top_level.txt +0 -0
@@ -1,21 +1,21 @@
1
- MIT License
2
-
3
- Copyright (c) 2022 Peng-Shuai Wang <wangps@hotmail.com>
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in
13
- all copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
- THE SOFTWARE.
1
+ MIT License
2
+
3
+ Copyright (c) 2022 Peng-Shuai Wang <wangps@hotmail.com>
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in
13
+ all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
+ THE SOFTWARE.
@@ -1 +1 @@
1
- recursive-exclude test *
1
+ recursive-exclude test *
ocnn-2.2.7/PKG-INFO ADDED
@@ -0,0 +1,112 @@
1
+ Metadata-Version: 2.4
2
+ Name: ocnn
3
+ Version: 2.2.7
4
+ Summary: Octree-based Sparse Convolutional Neural Networks
5
+ Home-page: https://github.com/octree-nn/ocnn-pytorch
6
+ Author: Peng-Shuai Wang
7
+ Author-email: wangps@hotmail.com
8
+ License: MIT
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Requires-Python: >=3.6
13
+ Description-Content-Type: text/markdown
14
+ License-File: LICENSE
15
+ Requires-Dist: torch
16
+ Requires-Dist: torchvision
17
+ Requires-Dist: numpy
18
+ Requires-Dist: packaging
19
+ Dynamic: author
20
+ Dynamic: author-email
21
+ Dynamic: classifier
22
+ Dynamic: description
23
+ Dynamic: description-content-type
24
+ Dynamic: home-page
25
+ Dynamic: license
26
+ Dynamic: license-file
27
+ Dynamic: requires-dist
28
+ Dynamic: requires-python
29
+ Dynamic: summary
30
+
31
+ # O-CNN
32
+
33
+ **[Documentation](https://ocnn-pytorch.readthedocs.io)**
34
+
35
+ [![Documentation Status](https://readthedocs.org/projects/ocnn-pytorch/badge/?version=latest)](https://ocnn-pytorch.readthedocs.io/en/latest/?badge=latest)
36
+ [![Downloads](https://static.pepy.tech/badge/ocnn)](https://pepy.tech/project/ocnn)
37
+ [![Downloads](https://static.pepy.tech/badge/ocnn/month)](https://pepy.tech/project/ocnn)
38
+ [![PyPI](https://img.shields.io/pypi/v/ocnn)](https://pypi.org/project/ocnn/)
39
+
40
+ This repository contains the **pure PyTorch**-based implementation of
41
+ [O-CNN](https://wang-ps.github.io/O-CNN.html). The code has been tested with
42
+ `Pytorch>=1.6.0`, and `Pytorch>=1.9.0` is preferred. The *original*
43
+ implementation of O-CNN is based on C++ and CUDA and can be found
44
+ [here](https://github.com/Microsoft/O-CNN), which has received
45
+ [![stars - O-CNN](https://img.shields.io/github/stars/microsoft/O-CNN?style=social)](https://github.com/microsoft/O-CNN) and
46
+ [![forks - O-CNN](https://img.shields.io/github/forks/microsoft/O-CNN?style=social)](https://github.com/microsoft/O-CNN).
47
+
48
+
49
+ O-CNN is an octree-based 3D convolutional neural network framework for 3D data.
50
+ O-CNN constrains the CNN storage and computation into non-empty sparse voxels
51
+ for efficiency and uses the `octree` data structure to organize and index these
52
+ sparse voxels. Currently, this type of 3D convolution is known as Sparse
53
+ Convolution in the research community.
54
+
55
+
56
+ The concept of Sparse Convolution in O-CNN is the same with
57
+ [SparseConvNet](https://openaccess.thecvf.com/content_cvpr_2018/papers/Graham_3D_Semantic_Segmentation_CVPR_2018_paper.pdf),
58
+ [MinkowskiNet](https://github.com/NVIDIA/MinkowskiEngine), and
59
+ [SpConv](https://github.com/traveller59/spconv).
60
+ The key difference is that our O-CNN uses `octrees` to index the sparse voxels,
61
+ while these works use `Hash Tables`. However, I believe that `octrees` may be
62
+ the right choice for Sparse Convolution. With `octrees`, I can implement the
63
+ Sparse Convolution with pure PyTorch. More importantly, with `octrees`, I can
64
+ also build efficient transformers for 3D data --
65
+ [OctFormer](https://github.com/octree-nn/octformer), which is extremely hard
66
+ with `Hash Tables`.
67
+
68
+
69
+ Our O-CNN is published in SIGGRAPH 2017, SparseConvNet is published in CVPR
70
+ 2018, and MinkowskiNet is published in CVPR 2019. Actually, our O-CNN was
71
+ submitted to SIGGRAPH in the end of 2016 and was officially accepted in March,
72
+ 2017. <!-- The camera-ready version of our O-CNN was submitted to SIGGRAPH in April, 2018. -->
73
+ We just did not post our paper on Arxiv during the review process of SIGGRAPH.
74
+ Therefore, **the idea of constraining CNN computation into sparse non-emtpry
75
+ voxels, i.e. Sparse Convolution, is first proposed by our O-CNN**.
76
+
77
+ <!--
78
+ Developed in collaboration with authors from [PointCNN](https://arxiv.org/abs/1801.07791),
79
+ [Dr. Yangyan Li](https://yangyan.li/) and [Prof. Baoquan Chen](https://baoquanchen.info/),
80
+ -->
81
+ This library supports point cloud processing from the ground up.
82
+ The library provides essential components for converting raw point clouds into
83
+ octrees to perform convolution operations. Of course, it also supports other 3D
84
+ data formats, such as meshes and volumetric grids, which can be converted into
85
+ octrees to leverage the library's capabilities.
86
+
87
+
88
+ ## Key benefits of ocnn-pytorch
89
+
90
+ - **Simplicity**. The ocnn-pytorch is based on pure PyTorch, it is portable and
91
+ can be installed with a simple command:`pip install ocnn`. Other sparse
92
+ convolution frameworks heavily rely on C++ and CUDA, and it is complicated to
93
+ configure the compiling environment.
94
+
95
+ - **Efficiency**. The ocnn-pytorch is very efficient compared with other sparse
96
+ convolution frameworks. It only takes 18 hours to train the network on
97
+ ScanNet for 600 epochs with 4 V100 GPUs. For reference, under the same
98
+ training settings, MinkowskiNet 0.4.3 takes 60 hours and MinkowskiNet 0.5.4
99
+ takes 30 hours.
100
+
101
+ ## Citation
102
+
103
+ ```bibtex
104
+ @article {Wang-2017-ocnn,
105
+ title = {{O-CNN}: Octree-based Convolutional Neural Networksfor {3D} Shape Analysis},
106
+ author = {Wang, Peng-Shuai and Liu, Yang and Guo, Yu-Xiao and Sun, Chun-Yu and Tong, Xin},
107
+ journal = {ACM Transactions on Graphics (SIGGRAPH)},
108
+ volume = {36},
109
+ number = {4},
110
+ year = {2017},
111
+ }
112
+ ```
ocnn-2.2.7/README.md ADDED
@@ -0,0 +1,82 @@
1
+ # O-CNN
2
+
3
+ **[Documentation](https://ocnn-pytorch.readthedocs.io)**
4
+
5
+ [![Documentation Status](https://readthedocs.org/projects/ocnn-pytorch/badge/?version=latest)](https://ocnn-pytorch.readthedocs.io/en/latest/?badge=latest)
6
+ [![Downloads](https://static.pepy.tech/badge/ocnn)](https://pepy.tech/project/ocnn)
7
+ [![Downloads](https://static.pepy.tech/badge/ocnn/month)](https://pepy.tech/project/ocnn)
8
+ [![PyPI](https://img.shields.io/pypi/v/ocnn)](https://pypi.org/project/ocnn/)
9
+
10
+ This repository contains the **pure PyTorch**-based implementation of
11
+ [O-CNN](https://wang-ps.github.io/O-CNN.html). The code has been tested with
12
+ `Pytorch>=1.6.0`, and `Pytorch>=1.9.0` is preferred. The *original*
13
+ implementation of O-CNN is based on C++ and CUDA and can be found
14
+ [here](https://github.com/Microsoft/O-CNN), which has received
15
+ [![stars - O-CNN](https://img.shields.io/github/stars/microsoft/O-CNN?style=social)](https://github.com/microsoft/O-CNN) and
16
+ [![forks - O-CNN](https://img.shields.io/github/forks/microsoft/O-CNN?style=social)](https://github.com/microsoft/O-CNN).
17
+
18
+
19
+ O-CNN is an octree-based 3D convolutional neural network framework for 3D data.
20
+ O-CNN constrains the CNN storage and computation into non-empty sparse voxels
21
+ for efficiency and uses the `octree` data structure to organize and index these
22
+ sparse voxels. Currently, this type of 3D convolution is known as Sparse
23
+ Convolution in the research community.
24
+
25
+
26
+ The concept of Sparse Convolution in O-CNN is the same with
27
+ [SparseConvNet](https://openaccess.thecvf.com/content_cvpr_2018/papers/Graham_3D_Semantic_Segmentation_CVPR_2018_paper.pdf),
28
+ [MinkowskiNet](https://github.com/NVIDIA/MinkowskiEngine), and
29
+ [SpConv](https://github.com/traveller59/spconv).
30
+ The key difference is that our O-CNN uses `octrees` to index the sparse voxels,
31
+ while these works use `Hash Tables`. However, I believe that `octrees` may be
32
+ the right choice for Sparse Convolution. With `octrees`, I can implement the
33
+ Sparse Convolution with pure PyTorch. More importantly, with `octrees`, I can
34
+ also build efficient transformers for 3D data --
35
+ [OctFormer](https://github.com/octree-nn/octformer), which is extremely hard
36
+ with `Hash Tables`.
37
+
38
+
39
+ Our O-CNN is published in SIGGRAPH 2017, SparseConvNet is published in CVPR
40
+ 2018, and MinkowskiNet is published in CVPR 2019. Actually, our O-CNN was
41
+ submitted to SIGGRAPH in the end of 2016 and was officially accepted in March,
42
+ 2017. <!-- The camera-ready version of our O-CNN was submitted to SIGGRAPH in April, 2018. -->
43
+ We just did not post our paper on Arxiv during the review process of SIGGRAPH.
44
+ Therefore, **the idea of constraining CNN computation into sparse non-emtpry
45
+ voxels, i.e. Sparse Convolution, is first proposed by our O-CNN**.
46
+
47
+ <!--
48
+ Developed in collaboration with authors from [PointCNN](https://arxiv.org/abs/1801.07791),
49
+ [Dr. Yangyan Li](https://yangyan.li/) and [Prof. Baoquan Chen](https://baoquanchen.info/),
50
+ -->
51
+ This library supports point cloud processing from the ground up.
52
+ The library provides essential components for converting raw point clouds into
53
+ octrees to perform convolution operations. Of course, it also supports other 3D
54
+ data formats, such as meshes and volumetric grids, which can be converted into
55
+ octrees to leverage the library's capabilities.
56
+
57
+
58
+ ## Key benefits of ocnn-pytorch
59
+
60
+ - **Simplicity**. The ocnn-pytorch is based on pure PyTorch, it is portable and
61
+ can be installed with a simple command:`pip install ocnn`. Other sparse
62
+ convolution frameworks heavily rely on C++ and CUDA, and it is complicated to
63
+ configure the compiling environment.
64
+
65
+ - **Efficiency**. The ocnn-pytorch is very efficient compared with other sparse
66
+ convolution frameworks. It only takes 18 hours to train the network on
67
+ ScanNet for 600 epochs with 4 V100 GPUs. For reference, under the same
68
+ training settings, MinkowskiNet 0.4.3 takes 60 hours and MinkowskiNet 0.5.4
69
+ takes 30 hours.
70
+
71
+ ## Citation
72
+
73
+ ```bibtex
74
+ @article {Wang-2017-ocnn,
75
+ title = {{O-CNN}: Octree-based Convolutional Neural Networksfor {3D} Shape Analysis},
76
+ author = {Wang, Peng-Shuai and Liu, Yang and Guo, Yu-Xiao and Sun, Chun-Yu and Tong, Xin},
77
+ journal = {ACM Transactions on Graphics (SIGGRAPH)},
78
+ volume = {36},
79
+ number = {4},
80
+ year = {2017},
81
+ }
82
+ ```
@@ -1,24 +1,24 @@
1
- # --------------------------------------------------------
2
- # Octree-based Sparse Convolutional Neural Networks
3
- # Copyright (c) 2022 Peng-Shuai Wang <wangps@hotmail.com>
4
- # Licensed under The MIT License [see LICENSE for details]
5
- # Written by Peng-Shuai Wang
6
- # --------------------------------------------------------
7
-
8
- from . import octree
9
- from . import nn
10
- from . import modules
11
- from . import models
12
- from . import dataset
13
- from . import utils
14
-
15
- __version__ = '2.2.5'
16
-
17
- __all__ = [
18
- 'octree',
19
- 'nn',
20
- 'modules',
21
- 'models',
22
- 'dataset',
23
- 'utils'
24
- ]
1
+ # --------------------------------------------------------
2
+ # Octree-based Sparse Convolutional Neural Networks
3
+ # Copyright (c) 2022 Peng-Shuai Wang <wangps@hotmail.com>
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Peng-Shuai Wang
6
+ # --------------------------------------------------------
7
+
8
+ from . import octree
9
+ from . import nn
10
+ from . import modules
11
+ from . import models
12
+ from . import dataset
13
+ from . import utils
14
+
15
+ __version__ = '2.2.7'
16
+
17
+ __all__ = [
18
+ 'octree',
19
+ 'nn',
20
+ 'modules',
21
+ 'models',
22
+ 'dataset',
23
+ 'utils'
24
+ ]
@@ -1,160 +1,160 @@
1
- # --------------------------------------------------------
2
- # Octree-based Sparse Convolutional Neural Networks
3
- # Copyright (c) 2022 Peng-Shuai Wang <wangps@hotmail.com>
4
- # Licensed under The MIT License [see LICENSE for details]
5
- # Written by Peng-Shuai Wang
6
- # --------------------------------------------------------
7
-
8
- import torch
9
-
10
- import ocnn
11
- from ocnn.octree import Octree, Points
12
-
13
-
14
- __all__ = ['Transform', 'CollateBatch']
15
- classes = __all__
16
-
17
-
18
- class Transform:
19
- r''' A boilerplate class which transforms an input data for :obj:`ocnn`.
20
- The input data is first converted to :class:`Points`, then randomly transformed
21
- (if enabled), and converted to an :class:`Octree`.
22
-
23
- Args:
24
- depth (int): The octree depth.
25
- full_depth (int): The octree layers with a depth small than
26
- :attr:`full_depth` are forced to be full.
27
- distort (bool): If true, performs the data augmentation.
28
- angle (list): A list of 3 float values to generate random rotation angles.
29
- interval (list): A list of 3 float values to represent the interval of
30
- rotation angles.
31
- scale (float): The maximum relative scale factor.
32
- uniform (bool): If true, performs uniform scaling.
33
- jittor (float): The maximum jitter values.
34
- orient_normal (str): Orient point normals along the specified axis, which is
35
- useful when normals are not oriented.
36
- '''
37
-
38
- def __init__(self, depth: int, full_depth: int, distort: bool, angle: list,
39
- interval: list, scale: float, uniform: bool, jitter: float,
40
- flip: list, orient_normal: str = '', **kwargs):
41
- super().__init__()
42
-
43
- # for octree building
44
- self.depth = depth
45
- self.full_depth = full_depth
46
-
47
- # for data augmentation
48
- self.distort = distort
49
- self.angle = angle
50
- self.interval = interval
51
- self.scale = scale
52
- self.uniform = uniform
53
- self.jitter = jitter
54
- self.flip = flip
55
-
56
- # for other transformations
57
- self.orient_normal = orient_normal
58
-
59
- def __call__(self, sample: dict, idx: int):
60
- r''''''
61
-
62
- output = self.preprocess(sample, idx)
63
- output = self.transform(output, idx)
64
- output['octree'] = self.points2octree(output['points'])
65
- return output
66
-
67
- def preprocess(self, sample: dict, idx: int):
68
- r''' Transforms :attr:`sample` to :class:`Points` and performs some specific
69
- transformations, like normalization.
70
- '''
71
-
72
- xyz = torch.from_numpy(sample.pop('points'))
73
- normals = torch.from_numpy(sample.pop('normals'))
74
- sample['points'] = Points(xyz, normals)
75
- return sample
76
-
77
- def transform(self, sample: dict, idx: int):
78
- r''' Applies the general transformations provided by :obj:`ocnn`.
79
- '''
80
-
81
- # The augmentations including rotation, scaling, and jittering.
82
- points = sample['points']
83
- if self.distort:
84
- rng_angle, rng_scale, rng_jitter, rnd_flip = self.rnd_parameters()
85
- points.flip(rnd_flip)
86
- points.rotate(rng_angle)
87
- points.translate(rng_jitter)
88
- points.scale(rng_scale)
89
-
90
- if self.orient_normal:
91
- points.orient_normal(self.orient_normal)
92
-
93
- # !!! NOTE: Clip the point cloud to [-1, 1] before building the octree
94
- inbox_mask = points.clip(min=-1, max=1)
95
- sample.update({'points': points, 'inbox_mask': inbox_mask})
96
- return sample
97
-
98
- def points2octree(self, points: Points):
99
- r''' Converts the input :attr:`points` to an octree.
100
- '''
101
-
102
- octree = Octree(self.depth, self.full_depth)
103
- octree.build_octree(points)
104
- return octree
105
-
106
- def rnd_parameters(self):
107
- r''' Generates random parameters for data augmentation.
108
- '''
109
-
110
- rnd_angle = [None] * 3
111
- for i in range(3):
112
- rot_num = self.angle[i] // self.interval[i]
113
- rnd = torch.randint(low=-rot_num, high=rot_num+1, size=(1,))
114
- rnd_angle[i] = rnd * self.interval[i] * (3.14159265 / 180.0)
115
- rnd_angle = torch.cat(rnd_angle)
116
-
117
- rnd_scale = torch.rand(3) * (2 * self.scale) - self.scale + 1.0
118
- if self.uniform:
119
- rnd_scale[1] = rnd_scale[0]
120
- rnd_scale[2] = rnd_scale[0]
121
-
122
- rnd_flip = ''
123
- for i, c in enumerate('xyz'):
124
- if torch.rand([1]) < self.flip[i]:
125
- rnd_flip = rnd_flip + c
126
-
127
- rnd_jitter = torch.rand(3) * (2 * self.jitter) - self.jitter
128
- return rnd_angle, rnd_scale, rnd_jitter, rnd_flip
129
-
130
-
131
- class CollateBatch:
132
- r''' Merge a list of octrees and points into a batch.
133
- '''
134
-
135
- def __init__(self, merge_points: bool = False):
136
- self.merge_points = merge_points
137
-
138
- def __call__(self, batch: list):
139
- assert type(batch) == list
140
-
141
- outputs = {}
142
- for key in batch[0].keys():
143
- outputs[key] = [b[key] for b in batch]
144
-
145
- # Merge a batch of octrees into one super octree
146
- if 'octree' in key:
147
- octree = ocnn.octree.merge_octrees(outputs[key])
148
- # NOTE: remember to construct the neighbor indices
149
- octree.construct_all_neigh()
150
- outputs[key] = octree
151
-
152
- # Merge a batch of points
153
- if 'points' in key and self.merge_points:
154
- outputs[key] = ocnn.octree.merge_points(outputs[key])
155
-
156
- # Convert the labels to a Tensor
157
- if 'label' in key:
158
- outputs['label'] = torch.tensor(outputs[key])
159
-
160
- return outputs
1
+ # --------------------------------------------------------
2
+ # Octree-based Sparse Convolutional Neural Networks
3
+ # Copyright (c) 2022 Peng-Shuai Wang <wangps@hotmail.com>
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Peng-Shuai Wang
6
+ # --------------------------------------------------------
7
+
8
+ import torch
9
+
10
+ import ocnn
11
+ from ocnn.octree import Octree, Points
12
+
13
+
14
+ __all__ = ['Transform', 'CollateBatch']
15
+ classes = __all__
16
+
17
+
18
+ class Transform:
19
+ r''' A boilerplate class which transforms an input data for :obj:`ocnn`.
20
+ The input data is first converted to :class:`Points`, then randomly transformed
21
+ (if enabled), and converted to an :class:`Octree`.
22
+
23
+ Args:
24
+ depth (int): The octree depth.
25
+ full_depth (int): The octree layers with a depth small than
26
+ :attr:`full_depth` are forced to be full.
27
+ distort (bool): If true, performs the data augmentation.
28
+ angle (list): A list of 3 float values to generate random rotation angles.
29
+ interval (list): A list of 3 float values to represent the interval of
30
+ rotation angles.
31
+ scale (float): The maximum relative scale factor.
32
+ uniform (bool): If true, performs uniform scaling.
33
+ jittor (float): The maximum jitter values.
34
+ orient_normal (str): Orient point normals along the specified axis, which is
35
+ useful when normals are not oriented.
36
+ '''
37
+
38
+ def __init__(self, depth: int, full_depth: int, distort: bool, angle: list,
39
+ interval: list, scale: float, uniform: bool, jitter: float,
40
+ flip: list, orient_normal: str = '', **kwargs):
41
+ super().__init__()
42
+
43
+ # for octree building
44
+ self.depth = depth
45
+ self.full_depth = full_depth
46
+
47
+ # for data augmentation
48
+ self.distort = distort
49
+ self.angle = angle
50
+ self.interval = interval
51
+ self.scale = scale
52
+ self.uniform = uniform
53
+ self.jitter = jitter
54
+ self.flip = flip
55
+
56
+ # for other transformations
57
+ self.orient_normal = orient_normal
58
+
59
+ def __call__(self, sample: dict, idx: int):
60
+ r''''''
61
+
62
+ output = self.preprocess(sample, idx)
63
+ output = self.transform(output, idx)
64
+ output['octree'] = self.points2octree(output['points'])
65
+ return output
66
+
67
+ def preprocess(self, sample: dict, idx: int):
68
+ r''' Transforms :attr:`sample` to :class:`Points` and performs some specific
69
+ transformations, like normalization.
70
+ '''
71
+
72
+ xyz = torch.from_numpy(sample.pop('points'))
73
+ normals = torch.from_numpy(sample.pop('normals'))
74
+ sample['points'] = Points(xyz, normals)
75
+ return sample
76
+
77
+ def transform(self, sample: dict, idx: int):
78
+ r''' Applies the general transformations provided by :obj:`ocnn`.
79
+ '''
80
+
81
+ # The augmentations including rotation, scaling, and jittering.
82
+ points = sample['points']
83
+ if self.distort:
84
+ rng_angle, rng_scale, rng_jitter, rnd_flip = self.rnd_parameters()
85
+ points.flip(rnd_flip)
86
+ points.rotate(rng_angle)
87
+ points.translate(rng_jitter)
88
+ points.scale(rng_scale)
89
+
90
+ if self.orient_normal:
91
+ points.orient_normal(self.orient_normal)
92
+
93
+ # !!! NOTE: Clip the point cloud to [-1, 1] before building the octree
94
+ inbox_mask = points.clip(min=-1, max=1)
95
+ sample.update({'points': points, 'inbox_mask': inbox_mask})
96
+ return sample
97
+
98
+ def points2octree(self, points: Points):
99
+ r''' Converts the input :attr:`points` to an octree.
100
+ '''
101
+
102
+ octree = Octree(self.depth, self.full_depth)
103
+ octree.build_octree(points)
104
+ return octree
105
+
106
+ def rnd_parameters(self):
107
+ r''' Generates random parameters for data augmentation.
108
+ '''
109
+
110
+ rnd_angle = [None] * 3
111
+ for i in range(3):
112
+ rot_num = self.angle[i] // self.interval[i]
113
+ rnd = torch.randint(low=-rot_num, high=rot_num+1, size=(1,))
114
+ rnd_angle[i] = rnd * self.interval[i] * (3.14159265 / 180.0)
115
+ rnd_angle = torch.cat(rnd_angle)
116
+
117
+ rnd_scale = torch.rand(3) * (2 * self.scale) - self.scale + 1.0
118
+ if self.uniform:
119
+ rnd_scale[1] = rnd_scale[0]
120
+ rnd_scale[2] = rnd_scale[0]
121
+
122
+ rnd_flip = ''
123
+ for i, c in enumerate('xyz'):
124
+ if torch.rand([1]) < self.flip[i]:
125
+ rnd_flip = rnd_flip + c
126
+
127
+ rnd_jitter = torch.rand(3) * (2 * self.jitter) - self.jitter
128
+ return rnd_angle, rnd_scale, rnd_jitter, rnd_flip
129
+
130
+
131
+ class CollateBatch:
132
+ r''' Merge a list of octrees and points into a batch.
133
+ '''
134
+
135
+ def __init__(self, merge_points: bool = False):
136
+ self.merge_points = merge_points
137
+
138
+ def __call__(self, batch: list):
139
+ assert type(batch) == list
140
+
141
+ outputs = {}
142
+ for key in batch[0].keys():
143
+ outputs[key] = [b[key] for b in batch]
144
+
145
+ # Merge a batch of octrees into one super octree
146
+ if 'octree' in key:
147
+ octree = ocnn.octree.merge_octrees(outputs[key])
148
+ # NOTE: remember to construct the neighbor indices
149
+ octree.construct_all_neigh()
150
+ outputs[key] = octree
151
+
152
+ # Merge a batch of points
153
+ if 'points' in key and self.merge_points:
154
+ outputs[key] = ocnn.octree.merge_points(outputs[key])
155
+
156
+ # Convert the labels to a Tensor
157
+ if 'label' in key:
158
+ outputs['label'] = torch.tensor(outputs[key])
159
+
160
+ return outputs
@@ -1,29 +1,29 @@
1
- # --------------------------------------------------------
2
- # Octree-based Sparse Convolutional Neural Networks
3
- # Copyright (c) 2022 Peng-Shuai Wang <wangps@hotmail.com>
4
- # Licensed under The MIT License [see LICENSE for details]
5
- # Written by Peng-Shuai Wang
6
- # --------------------------------------------------------
7
-
8
- from .lenet import LeNet
9
- from .resnet import ResNet
10
- from .segnet import SegNet
11
- from .unet import UNet
12
- from .hrnet import HRNet
13
- from .autoencoder import AutoEncoder
14
- from .ounet import OUNet
15
- from .image2shape import Image2Shape
16
-
17
-
18
- __all__ = [
19
- 'LeNet',
20
- 'ResNet',
21
- 'SegNet',
22
- 'UNet',
23
- 'HRNet',
24
- 'AutoEncoder',
25
- 'OUNet',
26
- 'Image2Shape',
27
- ]
28
-
29
- classes = __all__
1
+ # --------------------------------------------------------
2
+ # Octree-based Sparse Convolutional Neural Networks
3
+ # Copyright (c) 2022 Peng-Shuai Wang <wangps@hotmail.com>
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Peng-Shuai Wang
6
+ # --------------------------------------------------------
7
+
8
+ from .lenet import LeNet
9
+ from .resnet import ResNet
10
+ from .segnet import SegNet
11
+ from .unet import UNet
12
+ from .hrnet import HRNet
13
+ from .autoencoder import AutoEncoder
14
+ from .ounet import OUNet
15
+ from .image2shape import Image2Shape
16
+
17
+
18
+ __all__ = [
19
+ 'LeNet',
20
+ 'ResNet',
21
+ 'SegNet',
22
+ 'UNet',
23
+ 'HRNet',
24
+ 'AutoEncoder',
25
+ 'OUNet',
26
+ 'Image2Shape',
27
+ ]
28
+
29
+ classes = __all__