py4dgeo 0.6.0__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
py4dgeo/m3c2ep.py ADDED
@@ -0,0 +1,853 @@
1
+ import numpy as np
2
+ import py4dgeo
3
+ import math
4
+ import time
5
+ import scipy.stats as sstats
6
+ import multiprocessing as mp
7
+ import laspy
8
+
9
+ from py4dgeo.epoch import Epoch
10
+ from py4dgeo.util import (
11
+ as_double_precision,
12
+ Py4DGeoError,
13
+ )
14
+
15
+ from py4dgeo import M3C2
16
+
17
+ import warnings
18
+
19
+ warnings.filterwarnings("ignore")
20
+
21
+ try:
22
+ from tqdm import tqdm
23
+ except ImportError:
24
+ tqdm = None
25
+
26
+ default_tfM = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
27
+
28
+
29
+ class M3C2EP(M3C2):
30
+ def __init__(
31
+ self,
32
+ tfM: np.ndarray = default_tfM,
33
+ Cxx: np.ndarray = np.zeros((12, 12)),
34
+ refPointMov: np.ndarray = np.array([0, 0, 0]),
35
+ perform_trans: bool = True,
36
+ **kwargs,
37
+ ):
38
+ """An M3C2-EP implementation
39
+ that push the limits of 3D topographic point cloud change detection by error propagation.
40
+ The algorithm needs an alignment covariance matrix of shape 12 x 12, an affine transformation matrix
41
+ of shape 3 x 4 and a reduction point (x0,y0,z0) (rotation origin, 3 parameters) obtained from
42
+ aligning the two point clouds. The formula of the transformation see in user docs.
43
+ The transformation can be set by a boolean flag 'perform_trans' and is performed by default.
44
+ """
45
+ assert tfM.shape == (3, 4)
46
+ assert refPointMov.shape == (3,)
47
+ assert Cxx.shape == (12, 12)
48
+
49
+ self.tfM = tfM
50
+ self.Cxx = Cxx
51
+ self.refPointMov = refPointMov
52
+ self.perform_trans = perform_trans
53
+ super().__init__(**kwargs)
54
+
55
+ def calculate_distances(self, epoch1, epoch2):
56
+ print(self.name + " running")
57
+ """Calculate the distances between two epochs"""
58
+
59
+ if self.cyl_radii is None or len(self.cyl_radii) != 1:
60
+ raise Py4DGeoError(
61
+ f"{self.name} requires exactly one cylinder radius to be given"
62
+ )
63
+
64
+ epoch1.build_kdtree()
65
+ epoch2.build_kdtree()
66
+
67
+ p1_coords = epoch1.cloud
68
+ p1_positions = epoch1.scanpos_id
69
+ p2_coords = epoch2.cloud
70
+ p2_positions = epoch2.scanpos_id
71
+
72
+ # set default M3C2Meta
73
+ M3C2Meta = {"searchrad": 0.5, "maxdist": 3, "minneigh": 5, "maxneigh": 100000}
74
+ M3C2Meta["searchrad"] = self.cyl_radii[0]
75
+ M3C2Meta["maxdist"] = self.max_distance
76
+
77
+ M3C2Meta["spInfos"] = [epoch1.scanpos_info, epoch2.scanpos_info]
78
+ M3C2Meta["tfM"] = self.tfM
79
+ M3C2Meta["Cxx"] = self.Cxx
80
+ M3C2Meta["redPoint"] = self.refPointMov
81
+
82
+ refPointMov = self.refPointMov
83
+ tfM = self.tfM
84
+
85
+ # transform p2
86
+ if self.perform_trans:
87
+ p2_coords = p2_coords - refPointMov
88
+ p2_coords = np.dot(tfM[:3, :3], p2_coords.T).T + tfM[:, 3] + refPointMov
89
+
90
+ # load query points
91
+ query_coords = self.corepoints
92
+ query_norms = self.directions()
93
+
94
+ # Repeat normals to shape of corepoints when the user explicitly provided one direction
95
+ if query_norms.shape[0] == 1:
96
+ query_norms = query_norms.repeat(self.corepoints.shape[0], axis=0)
97
+
98
+ if query_norms is None:
99
+ raise Py4DGeoError("Core point point cloud needs normals set. Exiting.")
100
+ exit(-1)
101
+ subsample = False
102
+ if subsample:
103
+ sub_idx = np.random.choice(np.arange(0, query_coords.shape[0]), 2000)
104
+ query_coords = query_coords[sub_idx]
105
+ query_norms = query_norms[sub_idx]
106
+
107
+ NUM_THREADS = 4
108
+ NUM_BLOCKS = 16
109
+
110
+ query_coords_subs = np.array_split(query_coords, NUM_BLOCKS)
111
+ query_norms_subs = np.array_split(query_norms, NUM_BLOCKS)
112
+
113
+ # start mp
114
+ manager = mp.Manager()
115
+ return_dict = manager.dict()
116
+
117
+ # prepare shared memory
118
+ p1_coords_shm = mp.shared_memory.SharedMemory(
119
+ create=True, size=p1_coords.nbytes
120
+ )
121
+ p1_coords_sha = np.ndarray(
122
+ p1_coords.shape, dtype=p1_coords.dtype, buffer=p1_coords_shm.buf
123
+ )
124
+ p1_coords_sha[:] = p1_coords[:]
125
+ p2_coords_shm = mp.shared_memory.SharedMemory(
126
+ create=True, size=p2_coords.nbytes
127
+ )
128
+ p2_coords_sha = np.ndarray(
129
+ p2_coords.shape, dtype=p2_coords.dtype, buffer=p2_coords_shm.buf
130
+ )
131
+ p2_coords_sha[:] = p2_coords[:]
132
+
133
+ max_dist = M3C2Meta["maxdist"]
134
+ search_radius = M3C2Meta["searchrad"]
135
+ effective_search_radius = math.hypot(max_dist, search_radius)
136
+
137
+ # Querying neighbours
138
+ pbarQueue = mp.Queue()
139
+ pbarProc = mp.Process(
140
+ target=updatePbar, args=(query_coords.shape[0], pbarQueue, NUM_THREADS)
141
+ )
142
+ pbarProc.start()
143
+ procs = []
144
+
145
+ last_started_idx = -1
146
+ running_ps = []
147
+ while True:
148
+ if len(running_ps) < NUM_THREADS:
149
+ last_started_idx += 1
150
+ if last_started_idx < len(query_coords_subs):
151
+ curr_subs = query_coords_subs[last_started_idx]
152
+ p1_idx = radius_search(epoch1, curr_subs, effective_search_radius)
153
+ p2_idx = radius_search(epoch2, curr_subs, effective_search_radius)
154
+
155
+ p = mp.Process(
156
+ target=process_corepoint_list,
157
+ args=(
158
+ curr_subs,
159
+ query_norms_subs[last_started_idx],
160
+ p1_idx,
161
+ p1_coords_shm.name,
162
+ p1_coords.shape,
163
+ p1_positions,
164
+ p2_idx,
165
+ p2_coords_shm.name,
166
+ p2_coords.shape,
167
+ p2_positions,
168
+ M3C2Meta,
169
+ last_started_idx,
170
+ return_dict,
171
+ pbarQueue,
172
+ ),
173
+ )
174
+ procs.append(p)
175
+
176
+ procs[last_started_idx].start()
177
+ running_ps.append(last_started_idx)
178
+ else:
179
+ break
180
+ for running_p in running_ps:
181
+ if not procs[running_p].is_alive():
182
+ running_ps.remove(running_p)
183
+ time.sleep(1)
184
+
185
+ for p in procs:
186
+ p.join()
187
+
188
+ pbarQueue.put((0, 0))
189
+ pbarProc.terminate()
190
+
191
+ p1_coords_shm.close()
192
+ p1_coords_shm.unlink()
193
+ p2_coords_shm.close()
194
+ p2_coords_shm.unlink()
195
+
196
+ out_attrs = {
197
+ key: np.empty((query_coords.shape[0], 3, 3), dtype=val.dtype)
198
+ if key == "m3c2_cov1" or key == "m3c2_cov2"
199
+ else np.empty(query_coords.shape[0], dtype=val.dtype)
200
+ for key, val in return_dict[0].items()
201
+ }
202
+ for key in out_attrs:
203
+ curr_start = 0
204
+ for i in range(NUM_BLOCKS):
205
+ curr_len = return_dict[i][key].shape[0]
206
+ out_attrs[key][curr_start : curr_start + curr_len] = return_dict[i][key]
207
+ curr_start += curr_len
208
+
209
+ distances = out_attrs["val"]
210
+ cov1 = out_attrs["m3c2_cov1"]
211
+ cov2 = out_attrs["m3c2_cov2"]
212
+ unc = {
213
+ "lodetection": out_attrs["lod_new"],
214
+ "spread1": out_attrs["m3c2_spread1"],
215
+ "num_samples1": out_attrs["m3c2_n1"],
216
+ "spread2": out_attrs["m3c2_spread2"],
217
+ "num_samples2": out_attrs["m3c2_n2"],
218
+ }
219
+
220
+ unc_list = []
221
+ cov_list = []
222
+ for i in range(unc["lodetection"].shape[0]):
223
+ unc_item = (
224
+ unc["lodetection"][i],
225
+ unc["spread1"][i],
226
+ unc["num_samples1"][i],
227
+ unc["spread2"][i],
228
+ unc["num_samples2"][i],
229
+ )
230
+ unc_list.append(unc_item)
231
+ cov_item = (cov1[i], cov2[i])
232
+ cov_list.append(cov_item)
233
+
234
+ uncertainties = np.array(
235
+ unc_list,
236
+ dtype=[
237
+ ("lodetection", "f8"),
238
+ ("spread1", "f8"),
239
+ ("num_samples1", "i8"),
240
+ ("spread2", "f8"),
241
+ ("num_samples2", "i8"),
242
+ ],
243
+ )
244
+ covariance = np.array(
245
+ cov_list, dtype=[("cov1", "f8", (3, 3)), ("cov2", "f8", (3, 3))]
246
+ )
247
+ print(self.name + " end")
248
+ return distances, uncertainties, covariance
249
+
250
+ @property
251
+ def name(self):
252
+ return "M3C2EP"
253
+
254
+
255
+ def updatePbar(total, queue, maxProc):
256
+ desc = "Processing core points"
257
+ pCount = 0
258
+ if tqdm is None:
259
+ pbar = None
260
+ else:
261
+ pbar = tqdm(
262
+ total=total,
263
+ ncols=100,
264
+ desc=desc + " (%02d/%02d Process(es))" % (pCount, maxProc),
265
+ )
266
+
267
+ while True:
268
+ inc, process = queue.get()
269
+ if pbar is not None:
270
+ pbar.update(inc)
271
+ if process != 0:
272
+ pCount += process
273
+ pbar.set_description(
274
+ desc + " (%02d/%02d Process(es))" % (pCount, maxProc)
275
+ )
276
+
277
+
278
+ eijk = np.zeros((3, 3, 3))
279
+ eijk[0, 1, 2] = eijk[1, 2, 0] = eijk[2, 0, 1] = 1
280
+ eijk[0, 2, 1] = eijk[2, 1, 0] = eijk[1, 0, 2] = -1
281
+
282
+ dij = np.zeros((3, 3))
283
+ dij[0, 0] = dij[1, 1] = dij[2, 2] = 1
284
+
285
+ n = np.zeros((3,))
286
+ poa_pts = np.zeros((3, 100))
287
+ path_opt = np.einsum_path(
288
+ "mi, ijk, j, kn -> mn", dij, eijk, n, poa_pts, optimize="optimal"
289
+ )
290
+
291
+
292
+ def getAlongAcrossSqBatch(pts, poa, n):
293
+ pts_poa = pts - poa[:, np.newaxis]
294
+ alongs = n.dot(pts_poa)
295
+ poa_pts = poa[:, np.newaxis] - pts
296
+ crosses = np.einsum(
297
+ "mi, ijk, j, kn -> mn", dij, eijk, n, poa_pts, optimize=path_opt[0]
298
+ )
299
+ across2 = np.einsum("ij, ij -> j", crosses, crosses)
300
+ return (alongs, across2)
301
+
302
+
303
+ def get_local_mean_and_Cxx_nocorr(
304
+ Cxx, tfM, origins, redPoint, sigmas, curr_pts, curr_pos, epoch, tf=True
305
+ ):
306
+ nPts = curr_pts.shape[0]
307
+ A = np.tile(np.eye(3), (nPts, 1))
308
+ ATP = np.zeros((3, 3 * nPts))
309
+ tfM = tfM if tf else np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
310
+ dx = np.zeros((nPts,), dtype=np.float64)
311
+ dy = np.zeros((nPts,), dtype=np.float64)
312
+ dz = np.zeros((nPts,), dtype=np.float64)
313
+ rrange = np.zeros((nPts,), dtype=np.float64)
314
+ sinscan = np.zeros((nPts,), dtype=np.float64)
315
+ cosscan = np.zeros((nPts,), dtype=np.float64)
316
+ cosyaw = np.zeros((nPts,), dtype=np.float64)
317
+ sinyaw = np.zeros((nPts,), dtype=np.float64)
318
+ sigmaRange = np.zeros((nPts,), dtype=np.float64)
319
+ sigmaYaw = np.zeros((nPts,), dtype=np.float64)
320
+ sigmaScan = np.zeros((nPts,), dtype=np.float64)
321
+
322
+ for scanPosId in np.unique(curr_pos):
323
+ scanPos = np.array(origins[scanPosId - 1, :])
324
+ scanPosPtsIdx = curr_pos == scanPosId
325
+
326
+ dd = curr_pts[scanPosPtsIdx, :] - scanPos[np.newaxis, :]
327
+ dlx, dly, dlz = dd[:, 0], dd[:, 1], dd[:, 2]
328
+ yaw = np.arctan2(dly, dlx)
329
+ planar_dist = np.hypot(dlx, dly)
330
+ scan = np.pi / 2 - np.arctan(dlz / planar_dist)
331
+ rrange[scanPosPtsIdx] = np.hypot(planar_dist, dlz)
332
+ sinscan[scanPosPtsIdx] = np.sin(scan)
333
+ cosscan[scanPosPtsIdx] = np.cos(scan)
334
+ sinyaw[scanPosPtsIdx] = np.sin(yaw)
335
+ cosyaw[scanPosPtsIdx] = np.cos(yaw)
336
+
337
+ dr = curr_pts[scanPosPtsIdx, :] - redPoint
338
+ dx[scanPosPtsIdx] = dr[:, 0]
339
+ dy[scanPosPtsIdx] = dr[:, 1]
340
+ dz[scanPosPtsIdx] = dr[:, 2]
341
+
342
+ sigmaRange[scanPosPtsIdx] = np.array(
343
+ np.sqrt(
344
+ sigmas[scanPosId - 1][0] ** 2
345
+ + sigmas[scanPosId - 1][1] * 1e-6 * rrange[scanPosPtsIdx] ** 2
346
+ )
347
+ ) # a + b*d
348
+ sigmaYaw[scanPosPtsIdx] = np.array(sigmas[scanPosId - 1][2])
349
+ sigmaScan[scanPosPtsIdx] = np.array(sigmas[scanPosId - 1][3])
350
+
351
+ if tf:
352
+ SigmaXiXj = (
353
+ dx**2 * Cxx[0, 0]
354
+ + 2 * dx * dy * Cxx[0, 1] # a11a11
355
+ + dy**2 * Cxx[1, 1] # a11a12
356
+ + 2 * dy * dz * Cxx[1, 2] # a12a12
357
+ + dz**2 * Cxx[2, 2] # a12a13
358
+ + 2 * dz * dx * Cxx[0, 2] # a13a13
359
+ + 2 # a11a13
360
+ * (dx * Cxx[0, 9] + dy * Cxx[1, 9] + dz * Cxx[2, 9]) # a11tx # a12tx
361
+ + Cxx[9, 9] # a13tx
362
+ ) # txtx
363
+
364
+ SigmaYiYj = (
365
+ dx**2 * Cxx[3, 3]
366
+ + 2 * dx * dy * Cxx[3, 4] # a21a21
367
+ + dy**2 * Cxx[4, 4] # a21a22
368
+ + 2 * dy * dz * Cxx[4, 5] # a22a22
369
+ + dz**2 * Cxx[5, 5] # a22a23
370
+ + 2 * dz * dx * Cxx[3, 5] # a23a23
371
+ + 2 # a21a23
372
+ * (dx * Cxx[3, 10] + dy * Cxx[4, 10] + dz * Cxx[5, 10]) # a21ty # a22ty
373
+ + Cxx[10, 10] # a23ty
374
+ ) # tyty
375
+
376
+ SigmaZiZj = (
377
+ dx**2 * Cxx[6, 6]
378
+ + 2 * dx * dy * Cxx[6, 7] # a31a31
379
+ + dy**2 * Cxx[7, 7] # a31a32
380
+ + 2 * dy * dz * Cxx[7, 8] # a32a32
381
+ + dz**2 * Cxx[8, 8] # a32a33
382
+ + 2 * dz * dx * Cxx[6, 8] # a33a33
383
+ + 2 # a31a33
384
+ * (dx * Cxx[6, 11] + dy * Cxx[7, 11] + dz * Cxx[8, 11]) # a31tz # a32tz
385
+ + Cxx[11, 11] # a33tz
386
+ ) # tztz
387
+
388
+ SigmaXiYj = (
389
+ Cxx[9, 10]
390
+ + dx * Cxx[0, 10] # txty
391
+ + dy * Cxx[1, 10] # a11ty
392
+ + dz * Cxx[2, 10] # a12ty
393
+ + dx # a13ty
394
+ * (Cxx[3, 9] + Cxx[0, 3] * dx + Cxx[1, 3] * dy + Cxx[2, 3] * dz)
395
+ + dy * (Cxx[4, 9] + Cxx[0, 4] * dx + Cxx[1, 4] * dy + Cxx[2, 4] * dz)
396
+ + dz * (Cxx[5, 9] + Cxx[0, 5] * dx + Cxx[1, 5] * dy + Cxx[2, 5] * dz)
397
+ )
398
+
399
+ SigmaXiZj = (
400
+ Cxx[9, 11]
401
+ + dx * Cxx[0, 11] # txtz
402
+ + dy * Cxx[1, 11] # a11tz
403
+ + dz * Cxx[2, 11] # a12tz
404
+ + dx # a13tz
405
+ * (Cxx[6, 9] + Cxx[0, 6] * dx + Cxx[1, 6] * dy + Cxx[2, 6] * dz)
406
+ + dy * (Cxx[7, 9] + Cxx[0, 7] * dx + Cxx[1, 7] * dy + Cxx[2, 7] * dz)
407
+ + dz * (Cxx[8, 9] + Cxx[0, 8] * dx + Cxx[1, 8] * dy + Cxx[2, 8] * dz)
408
+ )
409
+
410
+ SigmaYiZj = (
411
+ Cxx[10, 11]
412
+ + dx * Cxx[6, 10] # tytz
413
+ + dy * Cxx[7, 10] # a21tx
414
+ + dz * Cxx[8, 10] # a22tx
415
+ + dx # a23tx
416
+ * (Cxx[3, 11] + Cxx[3, 6] * dx + Cxx[3, 7] * dy + Cxx[3, 8] * dz)
417
+ + dy * (Cxx[4, 11] + Cxx[4, 6] * dx + Cxx[4, 7] * dy + Cxx[4, 8] * dz)
418
+ + dz * (Cxx[5, 11] + Cxx[5, 6] * dx + Cxx[5, 7] * dy + Cxx[5, 8] * dz)
419
+ )
420
+ C11 = np.sum(SigmaXiXj) # sum over all j
421
+ C12 = np.sum(SigmaXiYj) # sum over all j
422
+ C13 = np.sum(SigmaXiZj) # sum over all j
423
+ C22 = np.sum(SigmaYiYj) # sum over all j
424
+ C23 = np.sum(SigmaYiZj) # sum over all j
425
+ C33 = np.sum(SigmaZiZj) # sum over all j
426
+ local_Cxx = np.array([[C11, C12, C13], [C12, C22, C23], [C13, C23, C33]])
427
+ else:
428
+ local_Cxx = np.zeros((3, 3))
429
+
430
+ C11p = (
431
+ (
432
+ tfM[0, 0] * cosyaw * sinscan
433
+ + tfM[0, 1] * sinyaw * sinscan # dX/dRange - measurements
434
+ + tfM[0, 2] * cosscan
435
+ )
436
+ ** 2
437
+ * sigmaRange**2
438
+ + (
439
+ -1 * tfM[0, 0] * rrange * sinyaw * sinscan
440
+ + tfM[0, 1] * rrange * cosyaw * sinscan # dX/dYaw
441
+ )
442
+ ** 2
443
+ * sigmaYaw**2
444
+ + (
445
+ tfM[0, 0] * rrange * cosyaw * cosscan
446
+ + tfM[0, 1] * rrange * sinyaw * cosscan # dX/dScan
447
+ + -1 * tfM[0, 2] * rrange * sinscan
448
+ )
449
+ ** 2
450
+ * sigmaScan**2
451
+ )
452
+
453
+ C12p = (
454
+ (
455
+ tfM[1, 0] * cosyaw * sinscan
456
+ + tfM[1, 1] * sinyaw * sinscan # dY/dRange - measurements
457
+ + tfM[1, 2] * cosscan
458
+ )
459
+ * (
460
+ tfM[0, 0] * cosyaw * sinscan
461
+ + tfM[0, 1] * sinyaw * sinscan # dX/dRange - measurements
462
+ + tfM[0, 2] * cosscan
463
+ )
464
+ * sigmaRange**2
465
+ + (
466
+ -1 * tfM[1, 0] * rrange * sinyaw * sinscan
467
+ + tfM[1, 1] * rrange * cosyaw * sinscan # dY/dYaw
468
+ )
469
+ * (
470
+ -1 * tfM[0, 0] * rrange * sinyaw * sinscan
471
+ + tfM[0, 1] * rrange * cosyaw * sinscan # dX/dYaw
472
+ )
473
+ * sigmaYaw**2
474
+ + (
475
+ tfM[0, 0] * rrange * cosyaw * cosscan
476
+ + tfM[0, 1] * rrange * sinyaw * cosscan # dX/dScan
477
+ + -1 * tfM[0, 2] * rrange * sinscan
478
+ )
479
+ * (
480
+ tfM[1, 0] * rrange * cosyaw * cosscan
481
+ + tfM[1, 1] * rrange * sinyaw * cosscan # dY/dScan
482
+ + -1 * tfM[1, 2] * rrange * sinscan
483
+ )
484
+ * sigmaScan**2
485
+ )
486
+
487
+ C22p = (
488
+ (
489
+ tfM[1, 0] * cosyaw * sinscan
490
+ + tfM[1, 1] * sinyaw * sinscan # dY/dRange - measurements
491
+ + tfM[1, 2] * cosscan
492
+ )
493
+ ** 2
494
+ * sigmaRange**2
495
+ + (
496
+ -1 * tfM[1, 0] * rrange * sinyaw * sinscan
497
+ + tfM[1, 1] * rrange * cosyaw * sinscan # dY/dYaw
498
+ )
499
+ ** 2
500
+ * sigmaYaw**2
501
+ + (
502
+ tfM[1, 0] * rrange * cosyaw * cosscan
503
+ + tfM[1, 1] * rrange * sinyaw * cosscan # dY/dScan
504
+ + -1 * tfM[1, 2] * rrange * sinscan
505
+ )
506
+ ** 2
507
+ * sigmaScan**2
508
+ )
509
+
510
+ C23p = (
511
+ (
512
+ tfM[1, 0] * cosyaw * sinscan
513
+ + tfM[1, 1] * sinyaw * sinscan # dY/dRange - measurements
514
+ + tfM[1, 2] * cosscan
515
+ )
516
+ * (
517
+ tfM[2, 0] * cosyaw * sinscan
518
+ + tfM[2, 1] * sinyaw * sinscan # dZ/dRange - measurements
519
+ + tfM[2, 2] * cosscan
520
+ )
521
+ * sigmaRange**2
522
+ + (
523
+ -1 * tfM[1, 0] * rrange * sinyaw * sinscan
524
+ + tfM[1, 1] * rrange * cosyaw * sinscan # dY/dYaw
525
+ )
526
+ * (
527
+ -1 * tfM[2, 0] * rrange * sinyaw * sinscan
528
+ + tfM[2, 1] * rrange * cosyaw * sinscan # dZ/dYaw
529
+ )
530
+ * sigmaYaw**2
531
+ + (
532
+ tfM[2, 0] * rrange * cosyaw * cosscan
533
+ + tfM[2, 1] * rrange * sinyaw * cosscan # dZ/dScan
534
+ + -1 * tfM[2, 2] * rrange * sinscan
535
+ )
536
+ * (
537
+ tfM[1, 0] * rrange * cosyaw * cosscan
538
+ + tfM[1, 1] * rrange * sinyaw * cosscan # dY/dScan
539
+ + -1 * tfM[1, 2] * rrange * sinscan
540
+ )
541
+ * sigmaScan**2
542
+ )
543
+
544
+ C33p = (
545
+ (
546
+ tfM[2, 0] * cosyaw * sinscan
547
+ + tfM[2, 1] * sinyaw * sinscan # dZ/dRange - measurements
548
+ + tfM[2, 2] * cosscan
549
+ )
550
+ ** 2
551
+ * sigmaRange**2
552
+ + (
553
+ -1 * tfM[2, 0] * rrange * sinyaw * sinscan
554
+ + tfM[2, 1] * rrange * cosyaw * sinscan # dZ/dYaw
555
+ )
556
+ ** 2
557
+ * sigmaYaw**2
558
+ + (
559
+ tfM[2, 0] * rrange * cosyaw * cosscan
560
+ + tfM[2, 1] * rrange * sinyaw * cosscan # dZ/dScan
561
+ + -1 * tfM[2, 2] * rrange * sinscan
562
+ )
563
+ ** 2
564
+ * sigmaScan**2
565
+ )
566
+
567
+ C13p = (
568
+ (
569
+ tfM[2, 0] * cosyaw * sinscan
570
+ + tfM[2, 1] * sinyaw * sinscan # dZ/dRange - measurements
571
+ + tfM[2, 2] * cosscan
572
+ )
573
+ * (
574
+ tfM[0, 0] * cosyaw * sinscan
575
+ + tfM[0, 1] * sinyaw * sinscan # dX/dRange - measurements
576
+ + tfM[0, 2] * cosscan
577
+ )
578
+ * sigmaRange**2
579
+ + (
580
+ -1 * tfM[2, 0] * rrange * sinyaw * sinscan
581
+ + tfM[2, 1] * rrange * cosyaw * sinscan # dZ/dYaw
582
+ )
583
+ * (
584
+ -1 * tfM[0, 0] * rrange * sinyaw * sinscan
585
+ + tfM[0, 1] * rrange * cosyaw * sinscan # dX/dYaw
586
+ )
587
+ * sigmaYaw**2
588
+ + (
589
+ tfM[2, 0] * rrange * cosyaw * cosscan
590
+ + tfM[2, 1] * rrange * sinyaw * cosscan # dZ/dScan
591
+ + -1 * tfM[2, 2] * rrange * sinscan
592
+ )
593
+ * (
594
+ tfM[0, 0] * rrange * cosyaw * cosscan
595
+ + tfM[0, 1] * rrange * sinyaw * cosscan # dX/dScan
596
+ + -1 * tfM[0, 2] * rrange * sinscan
597
+ )
598
+ * sigmaScan**2
599
+ )
600
+ local_Cxx[0, 0] += np.sum(C11p)
601
+ local_Cxx[0, 1] += np.sum(C12p)
602
+ local_Cxx[0, 2] += np.sum(C13p)
603
+ local_Cxx[1, 0] += np.sum(C12p)
604
+ local_Cxx[1, 1] += np.sum(C22p)
605
+ local_Cxx[1, 2] += np.sum(C23p)
606
+ local_Cxx[2, 1] += np.sum(C23p)
607
+ local_Cxx[2, 0] += np.sum(C13p)
608
+ local_Cxx[2, 2] += np.sum(C33p)
609
+
610
+ # Get mean without correlation (averages out anyway, or something...)
611
+ for pii in range(nPts):
612
+ Cxx = np.array(
613
+ [
614
+ [C11p[pii], C12p[pii], C13p[pii]],
615
+ [C12p[pii], C22p[pii], C23p[pii]],
616
+ [C13p[pii], C23p[pii], C33p[pii]],
617
+ ]
618
+ )
619
+ if np.linalg.det(Cxx) == 0:
620
+ Cxx = np.eye(3)
621
+ Cix = np.linalg.inv(Cxx)
622
+ ATP[:, pii * 3 : (pii + 1) * 3] = Cix
623
+ N = np.dot(ATP, A)
624
+ Qxx = np.linalg.inv(N) # can only have > 0 in main diagonal!
625
+ pts_m = curr_pts.mean(axis=0)
626
+ l = (curr_pts - pts_m).flatten()
627
+ mean = np.dot(Qxx, np.dot(ATP, l)) + pts_m
628
+
629
+ return mean, local_Cxx / nPts
630
+
631
+
632
+ def process_corepoint_list(
633
+ corepoints,
634
+ corepoint_normals,
635
+ p1_idx,
636
+ p1_shm_name,
637
+ p1_size,
638
+ p1_positions,
639
+ p2_idx,
640
+ p2_shm_name,
641
+ p2_size,
642
+ p2_positions,
643
+ M3C2Meta,
644
+ idx,
645
+ return_dict,
646
+ pbarQueue,
647
+ ):
648
+ pbarQueue.put((0, 1))
649
+ p1_shm = mp.shared_memory.SharedMemory(name=p1_shm_name)
650
+ p2_shm = mp.shared_memory.SharedMemory(name=p2_shm_name)
651
+ p1_coords = np.ndarray(p1_size, dtype=np.float64, buffer=p1_shm.buf)
652
+ p2_coords = np.ndarray(p2_size, dtype=np.float64, buffer=p2_shm.buf)
653
+
654
+ max_dist = M3C2Meta["maxdist"]
655
+ search_radius = M3C2Meta["searchrad"]
656
+
657
+ M3C2_vals = np.full((corepoints.shape[0]), np.nan, dtype=np.float64)
658
+ M3C2_LoD = np.full((corepoints.shape[0]), np.nan, dtype=np.float64)
659
+ M3C2_N1 = np.full((corepoints.shape[0]), np.nan, dtype=np.int32)
660
+ M3C2_N2 = np.full((corepoints.shape[0]), np.nan, dtype=np.int32)
661
+
662
+ M3C2_spread1 = np.full((corepoints.shape[0]), np.nan, dtype=np.float64)
663
+ M3C2_spread2 = np.full((corepoints.shape[0]), np.nan, dtype=np.float64)
664
+ M3C2_cov1 = np.full((corepoints.shape[0], 3, 3), np.nan, dtype=np.float64)
665
+ M3C2_cov2 = np.full((corepoints.shape[0], 3, 3), np.nan, dtype=np.float64)
666
+
667
+ for cp_idx, p1_neighbours in enumerate(p1_idx):
668
+ n = corepoint_normals[cp_idx]
669
+ p1_curr_pts = p1_coords[p1_neighbours, :]
670
+ along1, acrossSq1 = getAlongAcrossSqBatch(p1_curr_pts.T, corepoints[cp_idx], n)
671
+ p1_curr_pts = p1_curr_pts[
672
+ np.logical_and(np.abs(along1) <= max_dist, acrossSq1 <= search_radius**2),
673
+ :,
674
+ ]
675
+ p1_scanPos = p1_positions[p1_neighbours]
676
+ p1_scanPos = p1_scanPos[
677
+ np.logical_and(np.abs(along1) <= max_dist, acrossSq1 <= search_radius**2)
678
+ ]
679
+ if p1_curr_pts.shape[0] < M3C2Meta["minneigh"]:
680
+ pbarQueue.put((1, 0)) # point processed
681
+ M3C2_N1[cp_idx] = p1_curr_pts.shape[0]
682
+ continue
683
+ elif p1_curr_pts.shape[0] > M3C2Meta["maxneigh"]:
684
+ p1_curr_pts = p1_curr_pts[np.argsort(acrossSq1[: M3C2Meta["maxneigh"]])]
685
+ p1_scanPos = p1_scanPos[np.argsort(acrossSq1[: M3C2Meta["maxneigh"]])]
686
+
687
+ Cxx = M3C2Meta["Cxx"]
688
+ tfM = M3C2Meta["tfM"]
689
+ origins = np.array([SP["origin"] for SP in M3C2Meta["spInfos"][0]])
690
+ redPoint = M3C2Meta["redPoint"]
691
+ sigmas = np.array(
692
+ [
693
+ [
694
+ SP["sigma_range"],
695
+ SP["sigma_range"],
696
+ SP["sigma_scan"],
697
+ SP["sigma_yaw"],
698
+ ]
699
+ for SP in M3C2Meta["spInfos"][0]
700
+ ]
701
+ )
702
+
703
+ p1_weighted_CoG, p1_local_Cxx = get_local_mean_and_Cxx_nocorr(
704
+ Cxx,
705
+ tfM,
706
+ origins,
707
+ redPoint,
708
+ sigmas,
709
+ p1_curr_pts,
710
+ p1_scanPos,
711
+ epoch=0,
712
+ tf=False,
713
+ ) # only one dataset has been transformed
714
+ along1_var = np.var(
715
+ along1[
716
+ np.logical_and(
717
+ np.abs(along1) <= max_dist, acrossSq1 <= search_radius**2
718
+ )
719
+ ]
720
+ )
721
+
722
+ p2_neighbours = p2_idx[cp_idx]
723
+ p2_curr_pts = p2_coords[p2_neighbours, :]
724
+ along2, acrossSq2 = getAlongAcrossSqBatch(p2_curr_pts.T, corepoints[cp_idx], n)
725
+ p2_curr_pts = p2_curr_pts[
726
+ np.logical_and(np.abs(along2) <= max_dist, acrossSq2 <= search_radius**2),
727
+ :,
728
+ ]
729
+ p2_scanPos = p2_positions[p2_neighbours]
730
+ p2_scanPos = p2_scanPos[
731
+ np.logical_and(np.abs(along2) <= max_dist, acrossSq2 <= search_radius**2)
732
+ ]
733
+ if p2_curr_pts.shape[0] < M3C2Meta["minneigh"]:
734
+ pbarQueue.put((1, 0)) # point processed
735
+ M3C2_N2[cp_idx] = p2_curr_pts.shape[0]
736
+ continue
737
+ elif p2_curr_pts.shape[0] > M3C2Meta["maxneigh"]:
738
+ p2_curr_pts = p2_curr_pts[np.argsort(acrossSq2[: M3C2Meta["maxneigh"]])]
739
+ p2_scanPos = p2_scanPos[np.argsort(acrossSq2[: M3C2Meta["maxneigh"]])]
740
+
741
+ origins = np.array([SP["origin"] for SP in M3C2Meta["spInfos"][1]])
742
+ sigmas = np.array(
743
+ [
744
+ [
745
+ SP["sigma_range"],
746
+ SP["sigma_range"],
747
+ SP["sigma_scan"],
748
+ SP["sigma_yaw"],
749
+ ]
750
+ for SP in M3C2Meta["spInfos"][1]
751
+ ]
752
+ )
753
+ p2_weighted_CoG, p2_local_Cxx = get_local_mean_and_Cxx_nocorr(
754
+ Cxx,
755
+ tfM,
756
+ origins,
757
+ redPoint,
758
+ sigmas,
759
+ p2_curr_pts,
760
+ p2_scanPos,
761
+ epoch=1,
762
+ tf=True,
763
+ )
764
+ along2_var = np.var(
765
+ along2[
766
+ np.logical_and(
767
+ np.abs(along2) <= max_dist, acrossSq2 <= search_radius**2
768
+ )
769
+ ]
770
+ )
771
+
772
+ p1_CoG = p1_weighted_CoG
773
+ p2_CoG = p2_weighted_CoG
774
+
775
+ p1_CoG_Cxx = p1_local_Cxx
776
+ p2_CoG_Cxx = p2_local_Cxx
777
+
778
+ p1_p2_CoG_Cxx = np.zeros((6, 6))
779
+ p1_p2_CoG_Cxx[0:3, 0:3] = p1_CoG_Cxx
780
+ p1_p2_CoG_Cxx[3:6, 3:6] = p2_CoG_Cxx
781
+
782
+ M3C2_dist = n.dot(p1_CoG - p2_CoG)
783
+ F = np.hstack([-n, n])
784
+
785
+ M3C2_vals[cp_idx] = M3C2_dist
786
+
787
+ N1 = p1_curr_pts.shape[0]
788
+ N2 = p2_curr_pts.shape[0]
789
+
790
+ sigmaD = p1_CoG_Cxx + p2_CoG_Cxx
791
+
792
+ p = 3 # three dimensional
793
+ Tsqalt = n.T.dot(np.linalg.inv(sigmaD)).dot(n)
794
+
795
+ M3C2_LoD[cp_idx] = np.sqrt(sstats.chi2.ppf(0.95, p) / Tsqalt)
796
+ M3C2_N1[cp_idx] = N1
797
+ M3C2_N2[cp_idx] = N2
798
+
799
+ # add M3C2 spreads
800
+ normal = n[np.newaxis, :]
801
+ M3C2_spread1[cp_idx] = np.sqrt(
802
+ np.matmul(np.matmul(normal, p1_CoG_Cxx), normal.T)
803
+ )
804
+ M3C2_spread2[cp_idx] = np.sqrt(
805
+ np.matmul(np.matmul(normal, p2_CoG_Cxx), normal.T)
806
+ )
807
+ M3C2_cov1[cp_idx] = p1_CoG_Cxx
808
+ M3C2_cov2[cp_idx] = p2_CoG_Cxx
809
+
810
+ pbarQueue.put((1, 0)) # point processed
811
+
812
+ return_dict[idx] = {
813
+ "lod_new": M3C2_LoD,
814
+ "val": M3C2_vals,
815
+ "m3c2_n1": M3C2_N1,
816
+ "m3c2_n2": M3C2_N2,
817
+ "m3c2_spread1": M3C2_spread1,
818
+ "m3c2_spread2": M3C2_spread2,
819
+ "m3c2_cov1": M3C2_cov1,
820
+ "m3c2_cov2": M3C2_cov2,
821
+ }
822
+ pbarQueue.put((0, -1))
823
+ p1_shm.close()
824
+ p2_shm.close()
825
+
826
+
827
+ def radius_search(epoch: Epoch, query: np.ndarray, radius: float):
828
+ """Query the tree for neighbors within a radius r
829
+ :param query:
830
+ An array of points to query.
831
+ Array-like of shape (n_samples, 3) or query 1 sample point of shape (3,)
832
+ :type query: array
833
+ :param radius:
834
+ Rebuild the search tree even if it was already built before.
835
+ :type radius: float
836
+ """
837
+ if len(query.shape) == 1 and query.shape[0] == 3:
838
+ return [epoch.kdtree.radius_search(query, radius)]
839
+
840
+ if len(query.shape) == 2 and query.shape[1] == 3:
841
+ neighbors = []
842
+ for i in range(query.shape[0]):
843
+ q = query[i]
844
+ result = epoch.kdtree.radius_search(q, radius)
845
+ neighbors.append(result)
846
+ return neighbors
847
+
848
+ raise Py4DGeoError(
849
+ "Please ensure queries are array-like of shape (n_samples, 3)"
850
+ " or of shape (3,) to query 1 sample point!"
851
+ )
852
+
853
+ return None