py4dgeo 0.6.0__cp312-cp312-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _py4dgeo.cp312-win_amd64.pyd +0 -0
- py4dgeo/UpdateableZipFile.py +81 -0
- py4dgeo/__init__.py +28 -0
- py4dgeo/cloudcompare.py +34 -0
- py4dgeo/epoch.py +745 -0
- py4dgeo/fallback.py +159 -0
- py4dgeo/logger.py +77 -0
- py4dgeo/m3c2.py +224 -0
- py4dgeo/m3c2ep.py +853 -0
- py4dgeo/pbm3c2.py +3865 -0
- py4dgeo/py4dgeo_python.cpp +401 -0
- py4dgeo/registration.py +104 -0
- py4dgeo/segmentation.py +1202 -0
- py4dgeo/util.py +263 -0
- py4dgeo-0.6.0.dist-info/METADATA +203 -0
- py4dgeo-0.6.0.dist-info/RECORD +20 -0
- py4dgeo-0.6.0.dist-info/WHEEL +5 -0
- py4dgeo-0.6.0.dist-info/entry_points.txt +3 -0
- py4dgeo-0.6.0.dist-info/licenses/COPYING.md +17 -0
- py4dgeo-0.6.0.dist-info/licenses/LICENSE.md +5 -0
|
@@ -0,0 +1,401 @@
|
|
|
1
|
+
#include <pybind11/eigen.h>
|
|
2
|
+
#include <pybind11/functional.h>
|
|
3
|
+
#include <pybind11/numpy.h>
|
|
4
|
+
#include <pybind11/pybind11.h>
|
|
5
|
+
#include <pybind11/stl.h>
|
|
6
|
+
|
|
7
|
+
#ifdef PY4DGEO_WITH_OPENMP
|
|
8
|
+
#include <omp.h>
|
|
9
|
+
#endif
|
|
10
|
+
|
|
11
|
+
#include "py4dgeo/compute.hpp"
|
|
12
|
+
#include "py4dgeo/epoch.hpp"
|
|
13
|
+
#include "py4dgeo/kdtree.hpp"
|
|
14
|
+
#include "py4dgeo/py4dgeo.hpp"
|
|
15
|
+
#include "py4dgeo/pybind11_numpy_interop.hpp"
|
|
16
|
+
#include "py4dgeo/registration.hpp"
|
|
17
|
+
#include "py4dgeo/segmentation.hpp"
|
|
18
|
+
|
|
19
|
+
#include <fstream>
|
|
20
|
+
#include <sstream>
|
|
21
|
+
#include <string>
|
|
22
|
+
#include <tuple>
|
|
23
|
+
|
|
24
|
+
namespace py = pybind11;
|
|
25
|
+
|
|
26
|
+
namespace py4dgeo {
|
|
27
|
+
|
|
28
|
+
PYBIND11_MODULE(_py4dgeo, m)
|
|
29
|
+
{
|
|
30
|
+
m.doc() = "Python Bindings for py4dgeo";
|
|
31
|
+
|
|
32
|
+
// The enum class for our memory policy
|
|
33
|
+
py::enum_<MemoryPolicy>(m, "MemoryPolicy", py::arithmetic())
|
|
34
|
+
.value("STRICT", MemoryPolicy::STRICT)
|
|
35
|
+
.value("MINIMAL", MemoryPolicy::MINIMAL)
|
|
36
|
+
.value("COREPOINTS", MemoryPolicy::COREPOINTS)
|
|
37
|
+
.value("RELAXED", MemoryPolicy::RELAXED)
|
|
38
|
+
.export_values();
|
|
39
|
+
|
|
40
|
+
// Register a numpy structured type for uncertainty calculation. This allows
|
|
41
|
+
// us to allocate memory in C++ and expose it as a structured numpy array in
|
|
42
|
+
// Python. The given names will be usable in Python.
|
|
43
|
+
PYBIND11_NUMPY_DTYPE(DistanceUncertainty,
|
|
44
|
+
lodetection,
|
|
45
|
+
spread1,
|
|
46
|
+
num_samples1,
|
|
47
|
+
spread2,
|
|
48
|
+
num_samples2);
|
|
49
|
+
|
|
50
|
+
// Also expose the DistanceUncertainty data structure in Python, so that
|
|
51
|
+
// Python fallbacks can use it directly to define their result.
|
|
52
|
+
py::class_<DistanceUncertainty> unc(m, "DistanceUncertainty");
|
|
53
|
+
unc.def(py::init<double, double, IndexType, double, IndexType>(),
|
|
54
|
+
py::arg("lodetection") = 0.0,
|
|
55
|
+
py::arg("spread1") = 0.0,
|
|
56
|
+
py::arg("num_samples1") = 0,
|
|
57
|
+
py::arg("spread2") = 0.0,
|
|
58
|
+
py::arg("num_samples2") = 0);
|
|
59
|
+
|
|
60
|
+
// The epoch class
|
|
61
|
+
py::class_<Epoch> epoch(m, "Epoch");
|
|
62
|
+
|
|
63
|
+
// Initializing with a numpy array prevents the numpy array from being
|
|
64
|
+
// garbage collected as long as the Epoch object is alive
|
|
65
|
+
epoch.def(py::init<EigenPointCloudRef>(), py::keep_alive<1, 2>());
|
|
66
|
+
|
|
67
|
+
// We can directly access the point cloud and the kdtree
|
|
68
|
+
epoch.def_readwrite("cloud", &Epoch::cloud);
|
|
69
|
+
epoch.def_readwrite("kdtree", &Epoch::kdtree);
|
|
70
|
+
|
|
71
|
+
// Pickling support for the Epoch class
|
|
72
|
+
epoch.def(py::pickle(
|
|
73
|
+
[](const Epoch& self) {
|
|
74
|
+
// Serialize into in-memory stream
|
|
75
|
+
std::stringstream buf;
|
|
76
|
+
self.to_stream(buf);
|
|
77
|
+
return py::bytes(buf.str());
|
|
78
|
+
},
|
|
79
|
+
[](const py::bytes& data) {
|
|
80
|
+
std::stringstream buf(data.cast<std::string>());
|
|
81
|
+
return Epoch::from_stream(buf);
|
|
82
|
+
}));
|
|
83
|
+
|
|
84
|
+
// Expose the KDTree class
|
|
85
|
+
py::class_<KDTree> kdtree(m, "KDTree", py::buffer_protocol());
|
|
86
|
+
|
|
87
|
+
// Map __init__ to constructor
|
|
88
|
+
kdtree.def(py::init<>(&KDTree::create));
|
|
89
|
+
|
|
90
|
+
// Allow updating KDTree from a given file
|
|
91
|
+
kdtree.def("load_index", [](KDTree& self, std::string filename) {
|
|
92
|
+
std::ifstream stream(filename, std::ios::binary | std::ios::in);
|
|
93
|
+
self.loadIndex(stream);
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
// Allow dumping KDTree to a file
|
|
97
|
+
kdtree.def("save_index", [](const KDTree& self, std::string filename) {
|
|
98
|
+
std::ofstream stream(filename, std::ios::binary | std::ios::out);
|
|
99
|
+
self.saveIndex(stream);
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
// Allow building the KDTree structure
|
|
103
|
+
kdtree.def(
|
|
104
|
+
"build_tree", &KDTree::build_tree, "Trigger building the search tree");
|
|
105
|
+
|
|
106
|
+
// Allow invalidating the KDTree structure
|
|
107
|
+
kdtree.def("invalidate", &KDTree::invalidate, "Invalidate the search tree");
|
|
108
|
+
|
|
109
|
+
// Give access to the leaf parameter that the tree has been built with
|
|
110
|
+
kdtree.def("leaf_parameter",
|
|
111
|
+
&KDTree::get_leaf_parameter,
|
|
112
|
+
"Retrieve the leaf parameter that the tree has been built with.");
|
|
113
|
+
|
|
114
|
+
// Add all the radius search methods
|
|
115
|
+
kdtree.def(
|
|
116
|
+
"radius_search",
|
|
117
|
+
[](const KDTree& self, py::array_t<double> qp, double radius) {
|
|
118
|
+
// Get a pointer for the query point
|
|
119
|
+
auto ptr = static_cast<const double*>(qp.request().ptr);
|
|
120
|
+
|
|
121
|
+
KDTree::RadiusSearchResult result;
|
|
122
|
+
self.radius_search(ptr, radius, result);
|
|
123
|
+
|
|
124
|
+
return as_pyarray(std::move(result));
|
|
125
|
+
},
|
|
126
|
+
"Search point in given radius!");
|
|
127
|
+
|
|
128
|
+
kdtree.def(
|
|
129
|
+
"nearest_neighbors",
|
|
130
|
+
[](const KDTree& self, EigenPointCloudConstRef cloud) {
|
|
131
|
+
KDTree::NearestNeighborsDistanceResult result;
|
|
132
|
+
self.nearest_neighbors_with_distances(cloud, result);
|
|
133
|
+
|
|
134
|
+
return std::make_tuple(as_pyarray(std::move(result.first)),
|
|
135
|
+
as_pyarray(std::move(result.second)));
|
|
136
|
+
},
|
|
137
|
+
"Find nearest neighbors for all points in a cloud!");
|
|
138
|
+
|
|
139
|
+
// Pickling support for the KDTree data structure
|
|
140
|
+
kdtree.def("__getstate__", [](const KDTree&) {
|
|
141
|
+
// If a user pickles KDTree itself, we end up redundantly storing
|
|
142
|
+
// the point cloud itself, because the KDTree is only usable with the
|
|
143
|
+
// cloud (scipy does exactly the same). We solve the problem by asking
|
|
144
|
+
// users to pickle Epoch instead, which is the much cleaner solution.
|
|
145
|
+
throw std::runtime_error{
|
|
146
|
+
"Please pickle Epoch instead of KDTree. Otherwise unpickled KDTree does "
|
|
147
|
+
"not know the point cloud."
|
|
148
|
+
};
|
|
149
|
+
});
|
|
150
|
+
|
|
151
|
+
// The main distance computation function that is the main entry point of M3C2
|
|
152
|
+
m.def(
|
|
153
|
+
"compute_distances",
|
|
154
|
+
[](EigenPointCloudConstRef corepoints,
|
|
155
|
+
double scale,
|
|
156
|
+
const Epoch& epoch1,
|
|
157
|
+
const Epoch& epoch2,
|
|
158
|
+
EigenNormalSetConstRef directions,
|
|
159
|
+
double max_distance,
|
|
160
|
+
double registration_error,
|
|
161
|
+
const WorkingSetFinderCallback& workingsetfinder,
|
|
162
|
+
const DistanceUncertaintyCalculationCallback& distancecalculator) {
|
|
163
|
+
// Allocate memory for the return types
|
|
164
|
+
DistanceVector distances;
|
|
165
|
+
UncertaintyVector uncertainties;
|
|
166
|
+
|
|
167
|
+
{
|
|
168
|
+
// compute_distances may spawn multiple threads that may call Python
|
|
169
|
+
// functions (which requires them to acquire the GIL), so we need to
|
|
170
|
+
// first release the GIL on the main thread before calling
|
|
171
|
+
// compute_distances
|
|
172
|
+
py::gil_scoped_release release_gil;
|
|
173
|
+
compute_distances(corepoints,
|
|
174
|
+
scale,
|
|
175
|
+
epoch1,
|
|
176
|
+
epoch2,
|
|
177
|
+
directions,
|
|
178
|
+
max_distance,
|
|
179
|
+
registration_error,
|
|
180
|
+
distances,
|
|
181
|
+
uncertainties,
|
|
182
|
+
workingsetfinder,
|
|
183
|
+
distancecalculator);
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
return std::make_tuple(as_pyarray(std::move(distances)),
|
|
187
|
+
as_pyarray(std::move(uncertainties)));
|
|
188
|
+
},
|
|
189
|
+
"The main M3C2 distance calculation algorithm");
|
|
190
|
+
|
|
191
|
+
// Multiscale direction computation
|
|
192
|
+
m.def("compute_multiscale_directions",
|
|
193
|
+
&compute_multiscale_directions,
|
|
194
|
+
"Compute M3C2 multiscale directions");
|
|
195
|
+
|
|
196
|
+
// Callback parameter structs
|
|
197
|
+
py::class_<WorkingSetFinderParameters> ws_params(
|
|
198
|
+
m, "WorkingSetFinderParameters");
|
|
199
|
+
ws_params.def_property_readonly(
|
|
200
|
+
"epoch", [](const WorkingSetFinderParameters& self) { return self.epoch; });
|
|
201
|
+
ws_params.def_property_readonly(
|
|
202
|
+
"radius",
|
|
203
|
+
[](const WorkingSetFinderParameters& self) { return self.radius; });
|
|
204
|
+
ws_params.def_property_readonly(
|
|
205
|
+
"corepoint",
|
|
206
|
+
[](const WorkingSetFinderParameters& self) { return self.corepoint; });
|
|
207
|
+
ws_params.def_property_readonly(
|
|
208
|
+
"cylinder_axis",
|
|
209
|
+
[](const WorkingSetFinderParameters& self) { return self.cylinder_axis; });
|
|
210
|
+
ws_params.def_property_readonly(
|
|
211
|
+
"max_distance",
|
|
212
|
+
[](const WorkingSetFinderParameters& self) { return self.max_distance; });
|
|
213
|
+
|
|
214
|
+
py::class_<DistanceUncertaintyCalculationParameters> d_params(
|
|
215
|
+
m, "DistanceUncertaintyCalculationParameters");
|
|
216
|
+
d_params.def_property_readonly(
|
|
217
|
+
"workingset1", [](const DistanceUncertaintyCalculationParameters& self) {
|
|
218
|
+
return self.workingset1;
|
|
219
|
+
});
|
|
220
|
+
d_params.def_property_readonly(
|
|
221
|
+
"workingset2", [](const DistanceUncertaintyCalculationParameters& self) {
|
|
222
|
+
return self.workingset2;
|
|
223
|
+
});
|
|
224
|
+
d_params.def_property_readonly(
|
|
225
|
+
"corepoint", [](const DistanceUncertaintyCalculationParameters& self) {
|
|
226
|
+
return self.corepoint;
|
|
227
|
+
});
|
|
228
|
+
d_params.def_property_readonly(
|
|
229
|
+
"normal", [](const DistanceUncertaintyCalculationParameters& self) {
|
|
230
|
+
return self.normal;
|
|
231
|
+
});
|
|
232
|
+
d_params.def_property_readonly(
|
|
233
|
+
"registration_error",
|
|
234
|
+
[](const DistanceUncertaintyCalculationParameters& self) {
|
|
235
|
+
return self.registration_error;
|
|
236
|
+
});
|
|
237
|
+
|
|
238
|
+
// The ObjectByChange class is used as the return type for spatiotemporal
|
|
239
|
+
// segmentations
|
|
240
|
+
py::class_<ObjectByChange> obc(m, "ObjectByChange");
|
|
241
|
+
obc.def_property_readonly(
|
|
242
|
+
"indices_distances",
|
|
243
|
+
[](const ObjectByChange& self) { return self.indices_distances; });
|
|
244
|
+
obc.def_property_readonly(
|
|
245
|
+
"start_epoch", [](const ObjectByChange& self) { return self.start_epoch; });
|
|
246
|
+
obc.def_property_readonly(
|
|
247
|
+
"end_epoch", [](const ObjectByChange& self) { return self.end_epoch; });
|
|
248
|
+
obc.def_property_readonly(
|
|
249
|
+
"threshold", [](const ObjectByChange& self) { return self.threshold; });
|
|
250
|
+
obc.def(py::pickle(
|
|
251
|
+
[](const ObjectByChange& self) {
|
|
252
|
+
// Serialize into in-memory stream
|
|
253
|
+
std::stringstream buf;
|
|
254
|
+
|
|
255
|
+
// Write indices
|
|
256
|
+
std::size_t size = self.indices_distances.size();
|
|
257
|
+
buf.write(reinterpret_cast<const char*>(&size), sizeof(std::size_t));
|
|
258
|
+
for (auto p : self.indices_distances)
|
|
259
|
+
buf.write(reinterpret_cast<const char*>(&p),
|
|
260
|
+
sizeof(std::pair<IndexType, double>));
|
|
261
|
+
|
|
262
|
+
// Write other data
|
|
263
|
+
buf.write(reinterpret_cast<const char*>(&self.start_epoch),
|
|
264
|
+
sizeof(IndexType));
|
|
265
|
+
buf.write(reinterpret_cast<const char*>(&self.end_epoch),
|
|
266
|
+
sizeof(IndexType));
|
|
267
|
+
buf.write(reinterpret_cast<const char*>(&self.threshold), sizeof(double));
|
|
268
|
+
return py::bytes(buf.str());
|
|
269
|
+
},
|
|
270
|
+
[](const py::bytes& data) {
|
|
271
|
+
std::stringstream buf(data.cast<std::string>());
|
|
272
|
+
ObjectByChange obj;
|
|
273
|
+
|
|
274
|
+
std::size_t size;
|
|
275
|
+
buf.read(reinterpret_cast<char*>(&size), sizeof(std::size_t));
|
|
276
|
+
std::pair<IndexType, double> buffer;
|
|
277
|
+
for (std::size_t i = 0; i < size; ++i) {
|
|
278
|
+
buf.read(reinterpret_cast<char*>(&buffer),
|
|
279
|
+
sizeof(std::pair<IndexType, double>));
|
|
280
|
+
obj.indices_distances.insert(buffer);
|
|
281
|
+
}
|
|
282
|
+
buf.read(reinterpret_cast<char*>(&obj.start_epoch), sizeof(IndexType));
|
|
283
|
+
buf.read(reinterpret_cast<char*>(&obj.end_epoch), sizeof(IndexType));
|
|
284
|
+
buf.read(reinterpret_cast<char*>(&obj.threshold), sizeof(double));
|
|
285
|
+
return obj;
|
|
286
|
+
}));
|
|
287
|
+
|
|
288
|
+
py::class_<RegionGrowingSeed> rgs(m, "RegionGrowingSeed");
|
|
289
|
+
rgs.def(py::init<IndexType, IndexType, IndexType>(),
|
|
290
|
+
py::arg("index"),
|
|
291
|
+
py::arg("start_epoch"),
|
|
292
|
+
py::arg("end_epoch"));
|
|
293
|
+
rgs.def_property_readonly(
|
|
294
|
+
"index", [](const RegionGrowingSeed& self) { return self.index; });
|
|
295
|
+
rgs.def_property_readonly("start_epoch", [](const RegionGrowingSeed& self) {
|
|
296
|
+
return self.start_epoch;
|
|
297
|
+
});
|
|
298
|
+
rgs.def_property_readonly(
|
|
299
|
+
"end_epoch", [](const RegionGrowingSeed& self) { return self.end_epoch; });
|
|
300
|
+
rgs.def(py::pickle(
|
|
301
|
+
[](const RegionGrowingSeed& self) {
|
|
302
|
+
// Serialize into in-memory stream
|
|
303
|
+
std::stringstream buf;
|
|
304
|
+
buf.write(reinterpret_cast<const char*>(&self.index), sizeof(IndexType));
|
|
305
|
+
buf.write(reinterpret_cast<const char*>(&self.start_epoch),
|
|
306
|
+
sizeof(IndexType));
|
|
307
|
+
buf.write(reinterpret_cast<const char*>(&self.end_epoch),
|
|
308
|
+
sizeof(IndexType));
|
|
309
|
+
return py::bytes(buf.str());
|
|
310
|
+
},
|
|
311
|
+
[](const py::bytes& data) {
|
|
312
|
+
std::stringstream buf(data.cast<std::string>());
|
|
313
|
+
IndexType index, start_epoch, end_epoch;
|
|
314
|
+
buf.read(reinterpret_cast<char*>(&index), sizeof(IndexType));
|
|
315
|
+
buf.read(reinterpret_cast<char*>(&start_epoch), sizeof(IndexType));
|
|
316
|
+
buf.read(reinterpret_cast<char*>(&end_epoch), sizeof(IndexType));
|
|
317
|
+
return RegionGrowingSeed{ index, start_epoch, end_epoch };
|
|
318
|
+
}));
|
|
319
|
+
|
|
320
|
+
py::class_<RegionGrowingAlgorithmData> rgwd(m, "RegionGrowingAlgorithmData");
|
|
321
|
+
rgwd.def(py::init<EigenSpatiotemporalArrayConstRef,
|
|
322
|
+
const Epoch&,
|
|
323
|
+
double,
|
|
324
|
+
RegionGrowingSeed,
|
|
325
|
+
std::vector<double>,
|
|
326
|
+
std::size_t,
|
|
327
|
+
std::size_t>(),
|
|
328
|
+
py::arg("data"),
|
|
329
|
+
py::arg("epoch"),
|
|
330
|
+
py::arg("radius"),
|
|
331
|
+
py::arg("seed"),
|
|
332
|
+
py::arg("thresholds"),
|
|
333
|
+
py::arg("min_segments"),
|
|
334
|
+
py::arg("max_segments"));
|
|
335
|
+
|
|
336
|
+
py::class_<TimeseriesDistanceFunctionData> tdfd(
|
|
337
|
+
m, "TimeseriesDistanceFunctionData");
|
|
338
|
+
tdfd.def(py::init<EigenTimeSeriesConstRef, EigenTimeSeriesConstRef>(),
|
|
339
|
+
py::arg("ts1"),
|
|
340
|
+
py::arg("ts2"));
|
|
341
|
+
tdfd.def_property_readonly(
|
|
342
|
+
"ts1", [](const TimeseriesDistanceFunctionData& self) { return self.ts1; });
|
|
343
|
+
tdfd.def_property_readonly(
|
|
344
|
+
"ts2", [](const TimeseriesDistanceFunctionData& self) { return self.ts2; });
|
|
345
|
+
tdfd.def_property_readonly(
|
|
346
|
+
"norm1",
|
|
347
|
+
[](const TimeseriesDistanceFunctionData& self) { return self.norm1; });
|
|
348
|
+
tdfd.def_property_readonly(
|
|
349
|
+
"norm2",
|
|
350
|
+
[](const TimeseriesDistanceFunctionData& self) { return self.norm2; });
|
|
351
|
+
|
|
352
|
+
py::class_<ChangePointDetectionData> cpdd(m, "ChangePointDetectionData");
|
|
353
|
+
cpdd.def(
|
|
354
|
+
py::
|
|
355
|
+
init<EigenTimeSeriesConstRef, IndexType, IndexType, IndexType, double>(),
|
|
356
|
+
py::arg("ts"),
|
|
357
|
+
py::arg("window_size"),
|
|
358
|
+
py::arg("min_size"),
|
|
359
|
+
py::arg("jump"),
|
|
360
|
+
py::arg("penalty"));
|
|
361
|
+
|
|
362
|
+
m.def("transform_pointcloud_inplace",
|
|
363
|
+
[](EigenPointCloudRef cloud,
|
|
364
|
+
const py::array_t<double>& t,
|
|
365
|
+
EigenPointCloudConstRef rp) {
|
|
366
|
+
Transformation trafo;
|
|
367
|
+
|
|
368
|
+
auto r = t.unchecked<2>();
|
|
369
|
+
for (IndexType i = 0; i < 4; ++i)
|
|
370
|
+
for (IndexType j = 0; j < 4; ++j)
|
|
371
|
+
trafo(i, j) = r(i, j);
|
|
372
|
+
|
|
373
|
+
transform_pointcloud_inplace(cloud, trafo, rp);
|
|
374
|
+
});
|
|
375
|
+
|
|
376
|
+
// The main algorithms for the spatiotemporal segmentations
|
|
377
|
+
m.def("region_growing",
|
|
378
|
+
[](const RegionGrowingAlgorithmData& data,
|
|
379
|
+
const TimeseriesDistanceFunction& distance_function) {
|
|
380
|
+
// The region_growing function may call Python callback functions
|
|
381
|
+
py::gil_scoped_release release_gil;
|
|
382
|
+
return region_growing(data, distance_function);
|
|
383
|
+
});
|
|
384
|
+
m.def("change_point_detection", &change_point_detection);
|
|
385
|
+
|
|
386
|
+
// Callback implementations
|
|
387
|
+
m.def("radius_workingset_finder", &radius_workingset_finder);
|
|
388
|
+
m.def("cylinder_workingset_finder", &cylinder_workingset_finder);
|
|
389
|
+
m.def("mean_stddev_distance", &mean_stddev_distance);
|
|
390
|
+
m.def("median_iqr_distance", &median_iqr_distance);
|
|
391
|
+
m.def("dtw_distance", &dtw_distance);
|
|
392
|
+
m.def("normalized_dtw_distance", &normalized_dtw_distance);
|
|
393
|
+
|
|
394
|
+
// Expose OpenMP threading control
|
|
395
|
+
#ifdef PY4DGEO_WITH_OPENMP
|
|
396
|
+
m.def("omp_set_num_threads", &omp_set_num_threads);
|
|
397
|
+
m.def("omp_get_max_threads", &omp_get_max_threads);
|
|
398
|
+
#endif
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
} // namespace py4dgeo
|
py4dgeo/registration.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import dataclasses
|
|
2
|
+
import numpy as np
|
|
3
|
+
|
|
4
|
+
import _py4dgeo
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclasses.dataclass(frozen=True)
|
|
8
|
+
class Transformation:
|
|
9
|
+
"""A transformation that can be applied to a point cloud"""
|
|
10
|
+
|
|
11
|
+
affine_transformation: np.ndarray
|
|
12
|
+
reduction_point: np.ndarray
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _fit_transform(A, B, reduction_point=None):
|
|
16
|
+
"""Find a transformation that fits two point clouds onto each other"""
|
|
17
|
+
|
|
18
|
+
assert A.shape == B.shape
|
|
19
|
+
|
|
20
|
+
# get number of dimensions
|
|
21
|
+
m = A.shape[1]
|
|
22
|
+
|
|
23
|
+
centroid_A = np.mean(A, axis=0)
|
|
24
|
+
centroid_B = np.mean(B, axis=0)
|
|
25
|
+
|
|
26
|
+
# Apply the reduction_point if provided
|
|
27
|
+
if reduction_point is not None:
|
|
28
|
+
centroid_A -= reduction_point
|
|
29
|
+
centroid_B -= reduction_point
|
|
30
|
+
|
|
31
|
+
AA = A - centroid_A
|
|
32
|
+
BB = B - centroid_B
|
|
33
|
+
|
|
34
|
+
H = np.dot(AA.T, BB)
|
|
35
|
+
U, _, Vt = np.linalg.svd(H)
|
|
36
|
+
R = np.dot(Vt.T, U.T)
|
|
37
|
+
t = centroid_B.T - np.dot(R, centroid_A.T)
|
|
38
|
+
# special reflection case
|
|
39
|
+
if np.linalg.det(R) < 0:
|
|
40
|
+
Vt[2, :] *= -1
|
|
41
|
+
R = np.dot(Vt.T, U.T)
|
|
42
|
+
|
|
43
|
+
# homogeneous transformation
|
|
44
|
+
T = np.identity(4)
|
|
45
|
+
T[:3, :3] = R
|
|
46
|
+
T[:3, 3] = t
|
|
47
|
+
return T
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def iterative_closest_point(
|
|
51
|
+
reference_epoch, epoch, max_iterations=50, tolerance=0.00001, reduction_point=None
|
|
52
|
+
):
|
|
53
|
+
"""Perform an Iterative Closest Point algorithm (ICP)
|
|
54
|
+
|
|
55
|
+
:param reference_epoch:
|
|
56
|
+
The reference epoch to match with.
|
|
57
|
+
:type reference_epoch: py4dgeo.Epoch
|
|
58
|
+
:param epoch:
|
|
59
|
+
The epoch to be transformed to the reference epoch
|
|
60
|
+
:type epoch: py4dgeo.Epoch
|
|
61
|
+
:param max_iterations:
|
|
62
|
+
The maximum number of iterations to be performed in the ICP algorithm
|
|
63
|
+
:type max_iterations: int
|
|
64
|
+
:param tolerance:
|
|
65
|
+
The tolerance criterium used to terminate ICP iteration.
|
|
66
|
+
:type tolerance: float
|
|
67
|
+
:param reduction_point:
|
|
68
|
+
A translation vector to apply before applying rotation and scaling.
|
|
69
|
+
This is used to increase the numerical accuracy of transformation.
|
|
70
|
+
:type reduction_point: np.ndarray
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
# Ensure that reference_epoch has its KDTree built
|
|
74
|
+
if reference_epoch.kdtree.leaf_parameter() == 0:
|
|
75
|
+
reference_epoch.build_kdtree()
|
|
76
|
+
|
|
77
|
+
# Apply the default for the registration point
|
|
78
|
+
if reduction_point is None:
|
|
79
|
+
reduction_point = np.array([0, 0, 0])
|
|
80
|
+
|
|
81
|
+
# Make a copy of the cloud to be transformed.
|
|
82
|
+
cloud = epoch.cloud.copy()
|
|
83
|
+
|
|
84
|
+
prev_error = 0
|
|
85
|
+
|
|
86
|
+
for _ in range(max_iterations):
|
|
87
|
+
indices, distances = reference_epoch.kdtree.nearest_neighbors(cloud)
|
|
88
|
+
# Calculate a transform and apply it
|
|
89
|
+
|
|
90
|
+
T = _fit_transform(
|
|
91
|
+
cloud, reference_epoch.cloud[indices, :], reduction_point=reduction_point
|
|
92
|
+
)
|
|
93
|
+
_py4dgeo.transform_pointcloud_inplace(cloud, T, reduction_point)
|
|
94
|
+
|
|
95
|
+
# Determine convergence
|
|
96
|
+
mean_error = np.mean(np.sqrt(distances))
|
|
97
|
+
if np.abs(prev_error - mean_error) < tolerance:
|
|
98
|
+
break
|
|
99
|
+
prev_error = mean_error
|
|
100
|
+
|
|
101
|
+
return Transformation(
|
|
102
|
+
affine_transformation=_fit_transform(epoch.cloud, cloud),
|
|
103
|
+
reduction_point=reduction_point,
|
|
104
|
+
)
|