py4dgeo 1.0.0__cp314-cp314t-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
py4dgeo/m3c2ep.py ADDED
@@ -0,0 +1,856 @@
1
+ import numpy as np
2
+ import py4dgeo
3
+ import math
4
+ import time
5
+ import scipy.stats as sstats
6
+ import multiprocessing as mp
7
+ import laspy
8
+
9
+ from py4dgeo.epoch import Epoch
10
+ from py4dgeo.util import (
11
+ as_double_precision,
12
+ Py4DGeoError,
13
+ )
14
+
15
+ from py4dgeo import M3C2
16
+
17
+ import warnings
18
+
19
+ warnings.filterwarnings("ignore")
20
+
21
+ try:
22
+ from tqdm import tqdm
23
+ except ImportError:
24
+ tqdm = None
25
+
26
+ default_tfM = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
27
+
28
+
29
+ class M3C2EP(M3C2):
30
+ def __init__(
31
+ self,
32
+ tfM: np.ndarray = default_tfM,
33
+ Cxx: np.ndarray = np.zeros((12, 12)),
34
+ refPointMov: np.ndarray = np.array([0, 0, 0]),
35
+ perform_trans: bool = True,
36
+ **kwargs,
37
+ ):
38
+ """An M3C2-EP implementation
39
+ that push the limits of 3D topographic point cloud change detection by error propagation.
40
+ The algorithm needs an alignment covariance matrix of shape 12 x 12, an affine transformation matrix
41
+ of shape 3 x 4 and a reduction point (x0,y0,z0) (rotation origin, 3 parameters) obtained from
42
+ aligning the two point clouds. The formula of the transformation see in user docs.
43
+ The transformation can be set by a boolean flag 'perform_trans' and is performed by default.
44
+ """
45
+ assert tfM.shape == (3, 4)
46
+ assert refPointMov.shape == (3,)
47
+ assert Cxx.shape == (12, 12)
48
+
49
+ self.tfM = tfM
50
+ self.Cxx = Cxx
51
+ self.refPointMov = refPointMov
52
+ self.perform_trans = perform_trans
53
+ super().__init__(**kwargs)
54
+
55
+ def calculate_distances(self, epoch1, epoch2):
56
+ print(self.name + " running")
57
+ """Calculate the distances between two epochs"""
58
+
59
+ if not isinstance(self.cyl_radius, float):
60
+ raise Py4DGeoError(
61
+ f"{self.name} requires exactly one cylinder radius to be given"
62
+ )
63
+
64
+ # Ensure appropriate trees are built
65
+ epoch1._validate_search_tree()
66
+ epoch2._validate_search_tree()
67
+
68
+ p1_coords = epoch1.cloud
69
+ p1_positions = epoch1.scanpos_id
70
+ p2_coords = epoch2.cloud
71
+ p2_positions = epoch2.scanpos_id
72
+
73
+ # set default M3C2Meta
74
+ M3C2Meta = {"searchrad": 0.5, "maxdist": 3, "minneigh": 5, "maxneigh": 100000}
75
+ M3C2Meta["searchrad"] = self.cyl_radius
76
+ M3C2Meta["maxdist"] = self.max_distance
77
+
78
+ M3C2Meta["spInfos"] = [epoch1.scanpos_info, epoch2.scanpos_info]
79
+ M3C2Meta["tfM"] = self.tfM
80
+ M3C2Meta["Cxx"] = self.Cxx
81
+ M3C2Meta["redPoint"] = self.refPointMov
82
+
83
+ refPointMov = self.refPointMov
84
+ tfM = self.tfM
85
+
86
+ # transform p2
87
+ if self.perform_trans:
88
+ p2_coords = p2_coords - refPointMov
89
+ p2_coords = np.dot(tfM[:3, :3], p2_coords.T).T + tfM[:, 3] + refPointMov
90
+
91
+ # load query points
92
+ query_coords = self.corepoints
93
+ query_norms = self.directions()
94
+
95
+ # Repeat normals to shape of corepoints when the user explicitly provided one direction
96
+ if query_norms.shape[0] == 1:
97
+ query_norms = query_norms.repeat(self.corepoints.shape[0], axis=0)
98
+
99
+ if query_norms is None:
100
+ raise Py4DGeoError("Core point point cloud needs normals set. Exiting.")
101
+ exit(-1)
102
+ subsample = False
103
+ if subsample:
104
+ sub_idx = np.random.choice(np.arange(0, query_coords.shape[0]), 2000)
105
+ query_coords = query_coords[sub_idx]
106
+ query_norms = query_norms[sub_idx]
107
+
108
+ NUM_THREADS = 4
109
+ NUM_BLOCKS = 16
110
+
111
+ query_coords_subs = np.array_split(query_coords, NUM_BLOCKS)
112
+ query_norms_subs = np.array_split(query_norms, NUM_BLOCKS)
113
+
114
+ # start mp
115
+ manager = mp.Manager()
116
+ return_dict = manager.dict()
117
+
118
+ # prepare shared memory
119
+ p1_coords_shm = mp.shared_memory.SharedMemory(
120
+ create=True, size=p1_coords.nbytes
121
+ )
122
+ p1_coords_sha = np.ndarray(
123
+ p1_coords.shape, dtype=p1_coords.dtype, buffer=p1_coords_shm.buf
124
+ )
125
+ p1_coords_sha[:] = p1_coords[:]
126
+ p2_coords_shm = mp.shared_memory.SharedMemory(
127
+ create=True, size=p2_coords.nbytes
128
+ )
129
+ p2_coords_sha = np.ndarray(
130
+ p2_coords.shape, dtype=p2_coords.dtype, buffer=p2_coords_shm.buf
131
+ )
132
+ p2_coords_sha[:] = p2_coords[:]
133
+
134
+ max_dist = M3C2Meta["maxdist"]
135
+ search_radius = M3C2Meta["searchrad"]
136
+ effective_search_radius = math.hypot(max_dist, search_radius)
137
+
138
+ # Querying neighbours
139
+ pbarQueue = mp.Queue()
140
+ pbarProc = mp.Process(
141
+ target=updatePbar, args=(query_coords.shape[0], pbarQueue, NUM_THREADS)
142
+ )
143
+ pbarProc.start()
144
+ procs = []
145
+
146
+ last_started_idx = -1
147
+ running_ps = []
148
+ while True:
149
+ if len(running_ps) < NUM_THREADS:
150
+ last_started_idx += 1
151
+ if last_started_idx < len(query_coords_subs):
152
+ curr_subs = query_coords_subs[last_started_idx]
153
+ p1_idx = radius_search(epoch1, curr_subs, effective_search_radius)
154
+ p2_idx = radius_search(epoch2, curr_subs, effective_search_radius)
155
+
156
+ p = mp.Process(
157
+ target=process_corepoint_list,
158
+ args=(
159
+ curr_subs,
160
+ query_norms_subs[last_started_idx],
161
+ p1_idx,
162
+ p1_coords_shm.name,
163
+ p1_coords.shape,
164
+ p1_positions,
165
+ p2_idx,
166
+ p2_coords_shm.name,
167
+ p2_coords.shape,
168
+ p2_positions,
169
+ M3C2Meta,
170
+ last_started_idx,
171
+ return_dict,
172
+ pbarQueue,
173
+ ),
174
+ )
175
+ procs.append(p)
176
+
177
+ procs[last_started_idx].start()
178
+ running_ps.append(last_started_idx)
179
+ else:
180
+ break
181
+ for running_p in running_ps:
182
+ if not procs[running_p].is_alive():
183
+ running_ps.remove(running_p)
184
+ time.sleep(1)
185
+
186
+ for p in procs:
187
+ p.join()
188
+
189
+ pbarQueue.put((0, 0))
190
+ pbarProc.terminate()
191
+
192
+ p1_coords_shm.close()
193
+ p1_coords_shm.unlink()
194
+ p2_coords_shm.close()
195
+ p2_coords_shm.unlink()
196
+
197
+ out_attrs = {
198
+ key: (
199
+ np.empty((query_coords.shape[0], 3, 3), dtype=val.dtype)
200
+ if key == "m3c2_cov1" or key == "m3c2_cov2"
201
+ else np.empty(query_coords.shape[0], dtype=val.dtype)
202
+ )
203
+ for key, val in return_dict[0].items()
204
+ }
205
+ for key in out_attrs:
206
+ curr_start = 0
207
+ for i in range(NUM_BLOCKS):
208
+ curr_len = return_dict[i][key].shape[0]
209
+ out_attrs[key][curr_start : curr_start + curr_len] = return_dict[i][key]
210
+ curr_start += curr_len
211
+
212
+ distances = out_attrs["val"]
213
+ cov1 = out_attrs["m3c2_cov1"]
214
+ cov2 = out_attrs["m3c2_cov2"]
215
+ unc = {
216
+ "lodetection": out_attrs["lod_new"],
217
+ "spread1": out_attrs["m3c2_spread1"],
218
+ "num_samples1": out_attrs["m3c2_n1"],
219
+ "spread2": out_attrs["m3c2_spread2"],
220
+ "num_samples2": out_attrs["m3c2_n2"],
221
+ }
222
+
223
+ unc_list = []
224
+ cov_list = []
225
+ for i in range(unc["lodetection"].shape[0]):
226
+ unc_item = (
227
+ unc["lodetection"][i],
228
+ unc["spread1"][i],
229
+ unc["num_samples1"][i],
230
+ unc["spread2"][i],
231
+ unc["num_samples2"][i],
232
+ )
233
+ unc_list.append(unc_item)
234
+ cov_item = (cov1[i], cov2[i])
235
+ cov_list.append(cov_item)
236
+
237
+ uncertainties = np.array(
238
+ unc_list,
239
+ dtype=[
240
+ ("lodetection", "f8"),
241
+ ("spread1", "f8"),
242
+ ("num_samples1", "i8"),
243
+ ("spread2", "f8"),
244
+ ("num_samples2", "i8"),
245
+ ],
246
+ )
247
+ covariance = np.array(
248
+ cov_list, dtype=[("cov1", "f8", (3, 3)), ("cov2", "f8", (3, 3))]
249
+ )
250
+ print(self.name + " end")
251
+ return distances, uncertainties, covariance
252
+
253
+ @property
254
+ def name(self):
255
+ return "M3C2EP"
256
+
257
+
258
+ def updatePbar(total, queue, maxProc):
259
+ desc = "Processing core points"
260
+ pCount = 0
261
+ if tqdm is None:
262
+ pbar = None
263
+ else:
264
+ pbar = tqdm(
265
+ total=total,
266
+ ncols=100,
267
+ desc=desc + " (%02d/%02d Process(es))" % (pCount, maxProc),
268
+ )
269
+
270
+ while True:
271
+ inc, process = queue.get()
272
+ if pbar is not None:
273
+ pbar.update(inc)
274
+ if process != 0:
275
+ pCount += process
276
+ pbar.set_description(
277
+ desc + " (%02d/%02d Process(es))" % (pCount, maxProc)
278
+ )
279
+
280
+
281
+ eijk = np.zeros((3, 3, 3))
282
+ eijk[0, 1, 2] = eijk[1, 2, 0] = eijk[2, 0, 1] = 1
283
+ eijk[0, 2, 1] = eijk[2, 1, 0] = eijk[1, 0, 2] = -1
284
+
285
+ dij = np.zeros((3, 3))
286
+ dij[0, 0] = dij[1, 1] = dij[2, 2] = 1
287
+
288
+ n = np.zeros((3,))
289
+ poa_pts = np.zeros((3, 100))
290
+ path_opt = np.einsum_path(
291
+ "mi, ijk, j, kn -> mn", dij, eijk, n, poa_pts, optimize="optimal"
292
+ )
293
+
294
+
295
+ def getAlongAcrossSqBatch(pts, poa, n):
296
+ pts_poa = pts - poa[:, np.newaxis]
297
+ alongs = n.dot(pts_poa)
298
+ poa_pts = poa[:, np.newaxis] - pts
299
+ crosses = np.einsum(
300
+ "mi, ijk, j, kn -> mn", dij, eijk, n, poa_pts, optimize=path_opt[0]
301
+ )
302
+ across2 = np.einsum("ij, ij -> j", crosses, crosses)
303
+ return (alongs, across2)
304
+
305
+
306
+ def get_local_mean_and_Cxx_nocorr(
307
+ Cxx, tfM, origins, redPoint, sigmas, curr_pts, curr_pos, epoch, tf=True
308
+ ):
309
+ nPts = curr_pts.shape[0]
310
+ A = np.tile(np.eye(3), (nPts, 1))
311
+ ATP = np.zeros((3, 3 * nPts))
312
+ tfM = tfM if tf else np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
313
+ dx = np.zeros((nPts,), dtype=np.float64)
314
+ dy = np.zeros((nPts,), dtype=np.float64)
315
+ dz = np.zeros((nPts,), dtype=np.float64)
316
+ rrange = np.zeros((nPts,), dtype=np.float64)
317
+ sinscan = np.zeros((nPts,), dtype=np.float64)
318
+ cosscan = np.zeros((nPts,), dtype=np.float64)
319
+ cosyaw = np.zeros((nPts,), dtype=np.float64)
320
+ sinyaw = np.zeros((nPts,), dtype=np.float64)
321
+ sigmaRange = np.zeros((nPts,), dtype=np.float64)
322
+ sigmaYaw = np.zeros((nPts,), dtype=np.float64)
323
+ sigmaScan = np.zeros((nPts,), dtype=np.float64)
324
+
325
+ for scanPosId in np.unique(curr_pos):
326
+ scanPos = np.array(origins[scanPosId - 1, :])
327
+ scanPosPtsIdx = curr_pos == scanPosId
328
+
329
+ dd = curr_pts[scanPosPtsIdx, :] - scanPos[np.newaxis, :]
330
+ dlx, dly, dlz = dd[:, 0], dd[:, 1], dd[:, 2]
331
+ yaw = np.arctan2(dly, dlx)
332
+ planar_dist = np.hypot(dlx, dly)
333
+ scan = np.pi / 2 - np.arctan(dlz / planar_dist)
334
+ rrange[scanPosPtsIdx] = np.hypot(planar_dist, dlz)
335
+ sinscan[scanPosPtsIdx] = np.sin(scan)
336
+ cosscan[scanPosPtsIdx] = np.cos(scan)
337
+ sinyaw[scanPosPtsIdx] = np.sin(yaw)
338
+ cosyaw[scanPosPtsIdx] = np.cos(yaw)
339
+
340
+ dr = curr_pts[scanPosPtsIdx, :] - redPoint
341
+ dx[scanPosPtsIdx] = dr[:, 0]
342
+ dy[scanPosPtsIdx] = dr[:, 1]
343
+ dz[scanPosPtsIdx] = dr[:, 2]
344
+
345
+ sigmaRange[scanPosPtsIdx] = np.array(
346
+ np.sqrt(
347
+ sigmas[scanPosId - 1][0] ** 2
348
+ + sigmas[scanPosId - 1][1] * 1e-6 * rrange[scanPosPtsIdx] ** 2
349
+ )
350
+ ) # a + b*d
351
+ sigmaYaw[scanPosPtsIdx] = np.array(sigmas[scanPosId - 1][2])
352
+ sigmaScan[scanPosPtsIdx] = np.array(sigmas[scanPosId - 1][3])
353
+
354
+ if tf:
355
+ SigmaXiXj = (
356
+ dx**2 * Cxx[0, 0]
357
+ + 2 * dx * dy * Cxx[0, 1] # a11a11
358
+ + dy**2 * Cxx[1, 1] # a11a12
359
+ + 2 * dy * dz * Cxx[1, 2] # a12a12
360
+ + dz**2 * Cxx[2, 2] # a12a13
361
+ + 2 * dz * dx * Cxx[0, 2] # a13a13
362
+ + 2 # a11a13
363
+ * (dx * Cxx[0, 9] + dy * Cxx[1, 9] + dz * Cxx[2, 9]) # a11tx # a12tx
364
+ + Cxx[9, 9] # a13tx
365
+ ) # txtx
366
+
367
+ SigmaYiYj = (
368
+ dx**2 * Cxx[3, 3]
369
+ + 2 * dx * dy * Cxx[3, 4] # a21a21
370
+ + dy**2 * Cxx[4, 4] # a21a22
371
+ + 2 * dy * dz * Cxx[4, 5] # a22a22
372
+ + dz**2 * Cxx[5, 5] # a22a23
373
+ + 2 * dz * dx * Cxx[3, 5] # a23a23
374
+ + 2 # a21a23
375
+ * (dx * Cxx[3, 10] + dy * Cxx[4, 10] + dz * Cxx[5, 10]) # a21ty # a22ty
376
+ + Cxx[10, 10] # a23ty
377
+ ) # tyty
378
+
379
+ SigmaZiZj = (
380
+ dx**2 * Cxx[6, 6]
381
+ + 2 * dx * dy * Cxx[6, 7] # a31a31
382
+ + dy**2 * Cxx[7, 7] # a31a32
383
+ + 2 * dy * dz * Cxx[7, 8] # a32a32
384
+ + dz**2 * Cxx[8, 8] # a32a33
385
+ + 2 * dz * dx * Cxx[6, 8] # a33a33
386
+ + 2 # a31a33
387
+ * (dx * Cxx[6, 11] + dy * Cxx[7, 11] + dz * Cxx[8, 11]) # a31tz # a32tz
388
+ + Cxx[11, 11] # a33tz
389
+ ) # tztz
390
+
391
+ SigmaXiYj = (
392
+ Cxx[9, 10]
393
+ + dx * Cxx[0, 10] # txty
394
+ + dy * Cxx[1, 10] # a11ty
395
+ + dz * Cxx[2, 10] # a12ty
396
+ + dx # a13ty
397
+ * (Cxx[3, 9] + Cxx[0, 3] * dx + Cxx[1, 3] * dy + Cxx[2, 3] * dz)
398
+ + dy * (Cxx[4, 9] + Cxx[0, 4] * dx + Cxx[1, 4] * dy + Cxx[2, 4] * dz)
399
+ + dz * (Cxx[5, 9] + Cxx[0, 5] * dx + Cxx[1, 5] * dy + Cxx[2, 5] * dz)
400
+ )
401
+
402
+ SigmaXiZj = (
403
+ Cxx[9, 11]
404
+ + dx * Cxx[0, 11] # txtz
405
+ + dy * Cxx[1, 11] # a11tz
406
+ + dz * Cxx[2, 11] # a12tz
407
+ + dx # a13tz
408
+ * (Cxx[6, 9] + Cxx[0, 6] * dx + Cxx[1, 6] * dy + Cxx[2, 6] * dz)
409
+ + dy * (Cxx[7, 9] + Cxx[0, 7] * dx + Cxx[1, 7] * dy + Cxx[2, 7] * dz)
410
+ + dz * (Cxx[8, 9] + Cxx[0, 8] * dx + Cxx[1, 8] * dy + Cxx[2, 8] * dz)
411
+ )
412
+
413
+ SigmaYiZj = (
414
+ Cxx[10, 11]
415
+ + dx * Cxx[6, 10] # tytz
416
+ + dy * Cxx[7, 10] # a21tx
417
+ + dz * Cxx[8, 10] # a22tx
418
+ + dx # a23tx
419
+ * (Cxx[3, 11] + Cxx[3, 6] * dx + Cxx[3, 7] * dy + Cxx[3, 8] * dz)
420
+ + dy * (Cxx[4, 11] + Cxx[4, 6] * dx + Cxx[4, 7] * dy + Cxx[4, 8] * dz)
421
+ + dz * (Cxx[5, 11] + Cxx[5, 6] * dx + Cxx[5, 7] * dy + Cxx[5, 8] * dz)
422
+ )
423
+ C11 = np.sum(SigmaXiXj) # sum over all j
424
+ C12 = np.sum(SigmaXiYj) # sum over all j
425
+ C13 = np.sum(SigmaXiZj) # sum over all j
426
+ C22 = np.sum(SigmaYiYj) # sum over all j
427
+ C23 = np.sum(SigmaYiZj) # sum over all j
428
+ C33 = np.sum(SigmaZiZj) # sum over all j
429
+ local_Cxx = np.array([[C11, C12, C13], [C12, C22, C23], [C13, C23, C33]])
430
+ else:
431
+ local_Cxx = np.zeros((3, 3))
432
+
433
+ C11p = (
434
+ (
435
+ tfM[0, 0] * cosyaw * sinscan
436
+ + tfM[0, 1] * sinyaw * sinscan # dX/dRange - measurements
437
+ + tfM[0, 2] * cosscan
438
+ )
439
+ ** 2
440
+ * sigmaRange**2
441
+ + (
442
+ -1 * tfM[0, 0] * rrange * sinyaw * sinscan
443
+ + tfM[0, 1] * rrange * cosyaw * sinscan # dX/dYaw
444
+ )
445
+ ** 2
446
+ * sigmaYaw**2
447
+ + (
448
+ tfM[0, 0] * rrange * cosyaw * cosscan
449
+ + tfM[0, 1] * rrange * sinyaw * cosscan # dX/dScan
450
+ + -1 * tfM[0, 2] * rrange * sinscan
451
+ )
452
+ ** 2
453
+ * sigmaScan**2
454
+ )
455
+
456
+ C12p = (
457
+ (
458
+ tfM[1, 0] * cosyaw * sinscan
459
+ + tfM[1, 1] * sinyaw * sinscan # dY/dRange - measurements
460
+ + tfM[1, 2] * cosscan
461
+ )
462
+ * (
463
+ tfM[0, 0] * cosyaw * sinscan
464
+ + tfM[0, 1] * sinyaw * sinscan # dX/dRange - measurements
465
+ + tfM[0, 2] * cosscan
466
+ )
467
+ * sigmaRange**2
468
+ + (
469
+ -1 * tfM[1, 0] * rrange * sinyaw * sinscan
470
+ + tfM[1, 1] * rrange * cosyaw * sinscan # dY/dYaw
471
+ )
472
+ * (
473
+ -1 * tfM[0, 0] * rrange * sinyaw * sinscan
474
+ + tfM[0, 1] * rrange * cosyaw * sinscan # dX/dYaw
475
+ )
476
+ * sigmaYaw**2
477
+ + (
478
+ tfM[0, 0] * rrange * cosyaw * cosscan
479
+ + tfM[0, 1] * rrange * sinyaw * cosscan # dX/dScan
480
+ + -1 * tfM[0, 2] * rrange * sinscan
481
+ )
482
+ * (
483
+ tfM[1, 0] * rrange * cosyaw * cosscan
484
+ + tfM[1, 1] * rrange * sinyaw * cosscan # dY/dScan
485
+ + -1 * tfM[1, 2] * rrange * sinscan
486
+ )
487
+ * sigmaScan**2
488
+ )
489
+
490
+ C22p = (
491
+ (
492
+ tfM[1, 0] * cosyaw * sinscan
493
+ + tfM[1, 1] * sinyaw * sinscan # dY/dRange - measurements
494
+ + tfM[1, 2] * cosscan
495
+ )
496
+ ** 2
497
+ * sigmaRange**2
498
+ + (
499
+ -1 * tfM[1, 0] * rrange * sinyaw * sinscan
500
+ + tfM[1, 1] * rrange * cosyaw * sinscan # dY/dYaw
501
+ )
502
+ ** 2
503
+ * sigmaYaw**2
504
+ + (
505
+ tfM[1, 0] * rrange * cosyaw * cosscan
506
+ + tfM[1, 1] * rrange * sinyaw * cosscan # dY/dScan
507
+ + -1 * tfM[1, 2] * rrange * sinscan
508
+ )
509
+ ** 2
510
+ * sigmaScan**2
511
+ )
512
+
513
+ C23p = (
514
+ (
515
+ tfM[1, 0] * cosyaw * sinscan
516
+ + tfM[1, 1] * sinyaw * sinscan # dY/dRange - measurements
517
+ + tfM[1, 2] * cosscan
518
+ )
519
+ * (
520
+ tfM[2, 0] * cosyaw * sinscan
521
+ + tfM[2, 1] * sinyaw * sinscan # dZ/dRange - measurements
522
+ + tfM[2, 2] * cosscan
523
+ )
524
+ * sigmaRange**2
525
+ + (
526
+ -1 * tfM[1, 0] * rrange * sinyaw * sinscan
527
+ + tfM[1, 1] * rrange * cosyaw * sinscan # dY/dYaw
528
+ )
529
+ * (
530
+ -1 * tfM[2, 0] * rrange * sinyaw * sinscan
531
+ + tfM[2, 1] * rrange * cosyaw * sinscan # dZ/dYaw
532
+ )
533
+ * sigmaYaw**2
534
+ + (
535
+ tfM[2, 0] * rrange * cosyaw * cosscan
536
+ + tfM[2, 1] * rrange * sinyaw * cosscan # dZ/dScan
537
+ + -1 * tfM[2, 2] * rrange * sinscan
538
+ )
539
+ * (
540
+ tfM[1, 0] * rrange * cosyaw * cosscan
541
+ + tfM[1, 1] * rrange * sinyaw * cosscan # dY/dScan
542
+ + -1 * tfM[1, 2] * rrange * sinscan
543
+ )
544
+ * sigmaScan**2
545
+ )
546
+
547
+ C33p = (
548
+ (
549
+ tfM[2, 0] * cosyaw * sinscan
550
+ + tfM[2, 1] * sinyaw * sinscan # dZ/dRange - measurements
551
+ + tfM[2, 2] * cosscan
552
+ )
553
+ ** 2
554
+ * sigmaRange**2
555
+ + (
556
+ -1 * tfM[2, 0] * rrange * sinyaw * sinscan
557
+ + tfM[2, 1] * rrange * cosyaw * sinscan # dZ/dYaw
558
+ )
559
+ ** 2
560
+ * sigmaYaw**2
561
+ + (
562
+ tfM[2, 0] * rrange * cosyaw * cosscan
563
+ + tfM[2, 1] * rrange * sinyaw * cosscan # dZ/dScan
564
+ + -1 * tfM[2, 2] * rrange * sinscan
565
+ )
566
+ ** 2
567
+ * sigmaScan**2
568
+ )
569
+
570
+ C13p = (
571
+ (
572
+ tfM[2, 0] * cosyaw * sinscan
573
+ + tfM[2, 1] * sinyaw * sinscan # dZ/dRange - measurements
574
+ + tfM[2, 2] * cosscan
575
+ )
576
+ * (
577
+ tfM[0, 0] * cosyaw * sinscan
578
+ + tfM[0, 1] * sinyaw * sinscan # dX/dRange - measurements
579
+ + tfM[0, 2] * cosscan
580
+ )
581
+ * sigmaRange**2
582
+ + (
583
+ -1 * tfM[2, 0] * rrange * sinyaw * sinscan
584
+ + tfM[2, 1] * rrange * cosyaw * sinscan # dZ/dYaw
585
+ )
586
+ * (
587
+ -1 * tfM[0, 0] * rrange * sinyaw * sinscan
588
+ + tfM[0, 1] * rrange * cosyaw * sinscan # dX/dYaw
589
+ )
590
+ * sigmaYaw**2
591
+ + (
592
+ tfM[2, 0] * rrange * cosyaw * cosscan
593
+ + tfM[2, 1] * rrange * sinyaw * cosscan # dZ/dScan
594
+ + -1 * tfM[2, 2] * rrange * sinscan
595
+ )
596
+ * (
597
+ tfM[0, 0] * rrange * cosyaw * cosscan
598
+ + tfM[0, 1] * rrange * sinyaw * cosscan # dX/dScan
599
+ + -1 * tfM[0, 2] * rrange * sinscan
600
+ )
601
+ * sigmaScan**2
602
+ )
603
+ local_Cxx[0, 0] += np.sum(C11p)
604
+ local_Cxx[0, 1] += np.sum(C12p)
605
+ local_Cxx[0, 2] += np.sum(C13p)
606
+ local_Cxx[1, 0] += np.sum(C12p)
607
+ local_Cxx[1, 1] += np.sum(C22p)
608
+ local_Cxx[1, 2] += np.sum(C23p)
609
+ local_Cxx[2, 1] += np.sum(C23p)
610
+ local_Cxx[2, 0] += np.sum(C13p)
611
+ local_Cxx[2, 2] += np.sum(C33p)
612
+
613
+ # Get mean without correlation (averages out anyway, or something...)
614
+ for pii in range(nPts):
615
+ Cxx = np.array(
616
+ [
617
+ [C11p[pii], C12p[pii], C13p[pii]],
618
+ [C12p[pii], C22p[pii], C23p[pii]],
619
+ [C13p[pii], C23p[pii], C33p[pii]],
620
+ ]
621
+ )
622
+ if np.linalg.det(Cxx) == 0:
623
+ Cxx = np.eye(3)
624
+ Cix = np.linalg.inv(Cxx)
625
+ ATP[:, pii * 3 : (pii + 1) * 3] = Cix
626
+ N = np.dot(ATP, A)
627
+ Qxx = np.linalg.inv(N) # can only have > 0 in main diagonal!
628
+ pts_m = curr_pts.mean(axis=0)
629
+ l = (curr_pts - pts_m).flatten()
630
+ mean = np.dot(Qxx, np.dot(ATP, l)) + pts_m
631
+
632
+ return mean, local_Cxx / nPts
633
+
634
+
635
+ def process_corepoint_list(
636
+ corepoints,
637
+ corepoint_normals,
638
+ p1_idx,
639
+ p1_shm_name,
640
+ p1_size,
641
+ p1_positions,
642
+ p2_idx,
643
+ p2_shm_name,
644
+ p2_size,
645
+ p2_positions,
646
+ M3C2Meta,
647
+ idx,
648
+ return_dict,
649
+ pbarQueue,
650
+ ):
651
+ pbarQueue.put((0, 1))
652
+ p1_shm = mp.shared_memory.SharedMemory(name=p1_shm_name)
653
+ p2_shm = mp.shared_memory.SharedMemory(name=p2_shm_name)
654
+ p1_coords = np.ndarray(p1_size, dtype=np.float64, buffer=p1_shm.buf)
655
+ p2_coords = np.ndarray(p2_size, dtype=np.float64, buffer=p2_shm.buf)
656
+
657
+ max_dist = M3C2Meta["maxdist"]
658
+ search_radius = M3C2Meta["searchrad"]
659
+
660
+ M3C2_vals = np.full((corepoints.shape[0]), np.nan, dtype=np.float64)
661
+ M3C2_LoD = np.full((corepoints.shape[0]), np.nan, dtype=np.float64)
662
+ M3C2_N1 = np.full((corepoints.shape[0]), np.nan, dtype=np.int32)
663
+ M3C2_N2 = np.full((corepoints.shape[0]), np.nan, dtype=np.int32)
664
+
665
+ M3C2_spread1 = np.full((corepoints.shape[0]), np.nan, dtype=np.float64)
666
+ M3C2_spread2 = np.full((corepoints.shape[0]), np.nan, dtype=np.float64)
667
+ M3C2_cov1 = np.full((corepoints.shape[0], 3, 3), np.nan, dtype=np.float64)
668
+ M3C2_cov2 = np.full((corepoints.shape[0], 3, 3), np.nan, dtype=np.float64)
669
+
670
+ for cp_idx, p1_neighbours in enumerate(p1_idx):
671
+ n = corepoint_normals[cp_idx]
672
+ p1_curr_pts = p1_coords[p1_neighbours, :]
673
+ along1, acrossSq1 = getAlongAcrossSqBatch(p1_curr_pts.T, corepoints[cp_idx], n)
674
+ p1_curr_pts = p1_curr_pts[
675
+ np.logical_and(np.abs(along1) <= max_dist, acrossSq1 <= search_radius**2),
676
+ :,
677
+ ]
678
+ p1_scanPos = p1_positions[p1_neighbours]
679
+ p1_scanPos = p1_scanPos[
680
+ np.logical_and(np.abs(along1) <= max_dist, acrossSq1 <= search_radius**2)
681
+ ]
682
+ if p1_curr_pts.shape[0] < M3C2Meta["minneigh"]:
683
+ pbarQueue.put((1, 0)) # point processed
684
+ M3C2_N1[cp_idx] = p1_curr_pts.shape[0]
685
+ continue
686
+ elif p1_curr_pts.shape[0] > M3C2Meta["maxneigh"]:
687
+ p1_curr_pts = p1_curr_pts[np.argsort(acrossSq1[: M3C2Meta["maxneigh"]])]
688
+ p1_scanPos = p1_scanPos[np.argsort(acrossSq1[: M3C2Meta["maxneigh"]])]
689
+
690
+ Cxx = M3C2Meta["Cxx"]
691
+ tfM = M3C2Meta["tfM"]
692
+ origins = np.array([SP["origin"] for SP in M3C2Meta["spInfos"][0]])
693
+ redPoint = M3C2Meta["redPoint"]
694
+ sigmas = np.array(
695
+ [
696
+ [
697
+ SP["sigma_range"],
698
+ SP["sigma_range"],
699
+ SP["sigma_scan"],
700
+ SP["sigma_yaw"],
701
+ ]
702
+ for SP in M3C2Meta["spInfos"][0]
703
+ ]
704
+ )
705
+
706
+ p1_weighted_CoG, p1_local_Cxx = get_local_mean_and_Cxx_nocorr(
707
+ Cxx,
708
+ tfM,
709
+ origins,
710
+ redPoint,
711
+ sigmas,
712
+ p1_curr_pts,
713
+ p1_scanPos,
714
+ epoch=0,
715
+ tf=False,
716
+ ) # only one dataset has been transformed
717
+ along1_var = np.var(
718
+ along1[
719
+ np.logical_and(
720
+ np.abs(along1) <= max_dist, acrossSq1 <= search_radius**2
721
+ )
722
+ ]
723
+ )
724
+
725
+ p2_neighbours = p2_idx[cp_idx]
726
+ p2_curr_pts = p2_coords[p2_neighbours, :]
727
+ along2, acrossSq2 = getAlongAcrossSqBatch(p2_curr_pts.T, corepoints[cp_idx], n)
728
+ p2_curr_pts = p2_curr_pts[
729
+ np.logical_and(np.abs(along2) <= max_dist, acrossSq2 <= search_radius**2),
730
+ :,
731
+ ]
732
+ p2_scanPos = p2_positions[p2_neighbours]
733
+ p2_scanPos = p2_scanPos[
734
+ np.logical_and(np.abs(along2) <= max_dist, acrossSq2 <= search_radius**2)
735
+ ]
736
+ if p2_curr_pts.shape[0] < M3C2Meta["minneigh"]:
737
+ pbarQueue.put((1, 0)) # point processed
738
+ M3C2_N2[cp_idx] = p2_curr_pts.shape[0]
739
+ continue
740
+ elif p2_curr_pts.shape[0] > M3C2Meta["maxneigh"]:
741
+ p2_curr_pts = p2_curr_pts[np.argsort(acrossSq2[: M3C2Meta["maxneigh"]])]
742
+ p2_scanPos = p2_scanPos[np.argsort(acrossSq2[: M3C2Meta["maxneigh"]])]
743
+
744
+ origins = np.array([SP["origin"] for SP in M3C2Meta["spInfos"][1]])
745
+ sigmas = np.array(
746
+ [
747
+ [
748
+ SP["sigma_range"],
749
+ SP["sigma_range"],
750
+ SP["sigma_scan"],
751
+ SP["sigma_yaw"],
752
+ ]
753
+ for SP in M3C2Meta["spInfos"][1]
754
+ ]
755
+ )
756
+ p2_weighted_CoG, p2_local_Cxx = get_local_mean_and_Cxx_nocorr(
757
+ Cxx,
758
+ tfM,
759
+ origins,
760
+ redPoint,
761
+ sigmas,
762
+ p2_curr_pts,
763
+ p2_scanPos,
764
+ epoch=1,
765
+ tf=True,
766
+ )
767
+ along2_var = np.var(
768
+ along2[
769
+ np.logical_and(
770
+ np.abs(along2) <= max_dist, acrossSq2 <= search_radius**2
771
+ )
772
+ ]
773
+ )
774
+
775
+ p1_CoG = p1_weighted_CoG
776
+ p2_CoG = p2_weighted_CoG
777
+
778
+ p1_CoG_Cxx = p1_local_Cxx
779
+ p2_CoG_Cxx = p2_local_Cxx
780
+
781
+ p1_p2_CoG_Cxx = np.zeros((6, 6))
782
+ p1_p2_CoG_Cxx[0:3, 0:3] = p1_CoG_Cxx
783
+ p1_p2_CoG_Cxx[3:6, 3:6] = p2_CoG_Cxx
784
+
785
+ M3C2_dist = n.dot(p1_CoG - p2_CoG)
786
+ F = np.hstack([-n, n])
787
+
788
+ M3C2_vals[cp_idx] = M3C2_dist
789
+
790
+ N1 = p1_curr_pts.shape[0]
791
+ N2 = p2_curr_pts.shape[0]
792
+
793
+ sigmaD = p1_CoG_Cxx + p2_CoG_Cxx
794
+
795
+ p = 3 # three dimensional
796
+ Tsqalt = n.T.dot(np.linalg.inv(sigmaD)).dot(n)
797
+
798
+ M3C2_LoD[cp_idx] = np.sqrt(sstats.chi2.ppf(0.95, p) / Tsqalt)
799
+ M3C2_N1[cp_idx] = N1
800
+ M3C2_N2[cp_idx] = N2
801
+
802
+ # add M3C2 spreads
803
+ normal = n[np.newaxis, :]
804
+ M3C2_spread1[cp_idx] = np.sqrt(
805
+ np.matmul(np.matmul(normal, p1_CoG_Cxx), normal.T)
806
+ ).squeeze()
807
+ M3C2_spread2[cp_idx] = np.sqrt(
808
+ np.matmul(np.matmul(normal, p2_CoG_Cxx), normal.T)
809
+ ).squeeze()
810
+ M3C2_cov1[cp_idx] = p1_CoG_Cxx.squeeze()
811
+ M3C2_cov2[cp_idx] = p2_CoG_Cxx.squeeze()
812
+
813
+ pbarQueue.put((1, 0)) # point processed
814
+
815
+ return_dict[idx] = {
816
+ "lod_new": M3C2_LoD,
817
+ "val": M3C2_vals,
818
+ "m3c2_n1": M3C2_N1,
819
+ "m3c2_n2": M3C2_N2,
820
+ "m3c2_spread1": M3C2_spread1,
821
+ "m3c2_spread2": M3C2_spread2,
822
+ "m3c2_cov1": M3C2_cov1,
823
+ "m3c2_cov2": M3C2_cov2,
824
+ }
825
+ pbarQueue.put((0, -1))
826
+ p1_shm.close()
827
+ p2_shm.close()
828
+
829
+
830
+ def radius_search(epoch: Epoch, query: np.ndarray, radius: float):
831
+ """Query the tree for neighbors within a radius r
832
+ :param query:
833
+ An array of points to query.
834
+ Array-like of shape (n_samples, 3) or query 1 sample point of shape (3,)
835
+ :type query: array
836
+ :param radius:
837
+ Rebuild the search tree even if it was already built before.
838
+ :type radius: float
839
+ """
840
+ if len(query.shape) == 1 and query.shape[0] == 3:
841
+ return [epoch._radius_search(query, radius)]
842
+
843
+ if len(query.shape) == 2 and query.shape[1] == 3:
844
+ neighbors = []
845
+ for i in range(query.shape[0]):
846
+ q = query[i]
847
+ result = epoch._radius_search(q, radius)
848
+ neighbors.append(result)
849
+ return neighbors
850
+
851
+ raise Py4DGeoError(
852
+ "Please ensure queries are array-like of shape (n_samples, 3)"
853
+ " or of shape (3,) to query 1 sample point!"
854
+ )
855
+
856
+ return None