py4dgeo 0.7.0__cp313-cp313-macosx_14_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
py4dgeo/m3c2ep.py ADDED
@@ -0,0 +1,855 @@
1
+ import numpy as np
2
+ import py4dgeo
3
+ import math
4
+ import time
5
+ import scipy.stats as sstats
6
+ import multiprocessing as mp
7
+ import laspy
8
+
9
+ from py4dgeo.epoch import Epoch
10
+ from py4dgeo.util import (
11
+ as_double_precision,
12
+ Py4DGeoError,
13
+ )
14
+
15
+ from py4dgeo import M3C2
16
+
17
+ import warnings
18
+
19
+ warnings.filterwarnings("ignore")
20
+
21
+ try:
22
+ from tqdm import tqdm
23
+ except ImportError:
24
+ tqdm = None
25
+
26
+ default_tfM = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
27
+
28
+
29
+ class M3C2EP(M3C2):
30
+ def __init__(
31
+ self,
32
+ tfM: np.ndarray = default_tfM,
33
+ Cxx: np.ndarray = np.zeros((12, 12)),
34
+ refPointMov: np.ndarray = np.array([0, 0, 0]),
35
+ perform_trans: bool = True,
36
+ **kwargs,
37
+ ):
38
+ """An M3C2-EP implementation
39
+ that push the limits of 3D topographic point cloud change detection by error propagation.
40
+ The algorithm needs an alignment covariance matrix of shape 12 x 12, an affine transformation matrix
41
+ of shape 3 x 4 and a reduction point (x0,y0,z0) (rotation origin, 3 parameters) obtained from
42
+ aligning the two point clouds. The formula of the transformation see in user docs.
43
+ The transformation can be set by a boolean flag 'perform_trans' and is performed by default.
44
+ """
45
+ assert tfM.shape == (3, 4)
46
+ assert refPointMov.shape == (3,)
47
+ assert Cxx.shape == (12, 12)
48
+
49
+ self.tfM = tfM
50
+ self.Cxx = Cxx
51
+ self.refPointMov = refPointMov
52
+ self.perform_trans = perform_trans
53
+ super().__init__(**kwargs)
54
+
55
+ def calculate_distances(self, epoch1, epoch2):
56
+ print(self.name + " running")
57
+ """Calculate the distances between two epochs"""
58
+
59
+ if not isinstance(self.cyl_radius, float):
60
+ raise Py4DGeoError(
61
+ f"{self.name} requires exactly one cylinder radius to be given"
62
+ )
63
+
64
+ epoch1.build_kdtree()
65
+ epoch2.build_kdtree()
66
+
67
+ p1_coords = epoch1.cloud
68
+ p1_positions = epoch1.scanpos_id
69
+ p2_coords = epoch2.cloud
70
+ p2_positions = epoch2.scanpos_id
71
+
72
+ # set default M3C2Meta
73
+ M3C2Meta = {"searchrad": 0.5, "maxdist": 3, "minneigh": 5, "maxneigh": 100000}
74
+ M3C2Meta["searchrad"] = self.cyl_radius
75
+ M3C2Meta["maxdist"] = self.max_distance
76
+
77
+ M3C2Meta["spInfos"] = [epoch1.scanpos_info, epoch2.scanpos_info]
78
+ M3C2Meta["tfM"] = self.tfM
79
+ M3C2Meta["Cxx"] = self.Cxx
80
+ M3C2Meta["redPoint"] = self.refPointMov
81
+
82
+ refPointMov = self.refPointMov
83
+ tfM = self.tfM
84
+
85
+ # transform p2
86
+ if self.perform_trans:
87
+ p2_coords = p2_coords - refPointMov
88
+ p2_coords = np.dot(tfM[:3, :3], p2_coords.T).T + tfM[:, 3] + refPointMov
89
+
90
+ # load query points
91
+ query_coords = self.corepoints
92
+ query_norms = self.directions()
93
+
94
+ # Repeat normals to shape of corepoints when the user explicitly provided one direction
95
+ if query_norms.shape[0] == 1:
96
+ query_norms = query_norms.repeat(self.corepoints.shape[0], axis=0)
97
+
98
+ if query_norms is None:
99
+ raise Py4DGeoError("Core point point cloud needs normals set. Exiting.")
100
+ exit(-1)
101
+ subsample = False
102
+ if subsample:
103
+ sub_idx = np.random.choice(np.arange(0, query_coords.shape[0]), 2000)
104
+ query_coords = query_coords[sub_idx]
105
+ query_norms = query_norms[sub_idx]
106
+
107
+ NUM_THREADS = 4
108
+ NUM_BLOCKS = 16
109
+
110
+ query_coords_subs = np.array_split(query_coords, NUM_BLOCKS)
111
+ query_norms_subs = np.array_split(query_norms, NUM_BLOCKS)
112
+
113
+ # start mp
114
+ manager = mp.Manager()
115
+ return_dict = manager.dict()
116
+
117
+ # prepare shared memory
118
+ p1_coords_shm = mp.shared_memory.SharedMemory(
119
+ create=True, size=p1_coords.nbytes
120
+ )
121
+ p1_coords_sha = np.ndarray(
122
+ p1_coords.shape, dtype=p1_coords.dtype, buffer=p1_coords_shm.buf
123
+ )
124
+ p1_coords_sha[:] = p1_coords[:]
125
+ p2_coords_shm = mp.shared_memory.SharedMemory(
126
+ create=True, size=p2_coords.nbytes
127
+ )
128
+ p2_coords_sha = np.ndarray(
129
+ p2_coords.shape, dtype=p2_coords.dtype, buffer=p2_coords_shm.buf
130
+ )
131
+ p2_coords_sha[:] = p2_coords[:]
132
+
133
+ max_dist = M3C2Meta["maxdist"]
134
+ search_radius = M3C2Meta["searchrad"]
135
+ effective_search_radius = math.hypot(max_dist, search_radius)
136
+
137
+ # Querying neighbours
138
+ pbarQueue = mp.Queue()
139
+ pbarProc = mp.Process(
140
+ target=updatePbar, args=(query_coords.shape[0], pbarQueue, NUM_THREADS)
141
+ )
142
+ pbarProc.start()
143
+ procs = []
144
+
145
+ last_started_idx = -1
146
+ running_ps = []
147
+ while True:
148
+ if len(running_ps) < NUM_THREADS:
149
+ last_started_idx += 1
150
+ if last_started_idx < len(query_coords_subs):
151
+ curr_subs = query_coords_subs[last_started_idx]
152
+ p1_idx = radius_search(epoch1, curr_subs, effective_search_radius)
153
+ p2_idx = radius_search(epoch2, curr_subs, effective_search_radius)
154
+
155
+ p = mp.Process(
156
+ target=process_corepoint_list,
157
+ args=(
158
+ curr_subs,
159
+ query_norms_subs[last_started_idx],
160
+ p1_idx,
161
+ p1_coords_shm.name,
162
+ p1_coords.shape,
163
+ p1_positions,
164
+ p2_idx,
165
+ p2_coords_shm.name,
166
+ p2_coords.shape,
167
+ p2_positions,
168
+ M3C2Meta,
169
+ last_started_idx,
170
+ return_dict,
171
+ pbarQueue,
172
+ ),
173
+ )
174
+ procs.append(p)
175
+
176
+ procs[last_started_idx].start()
177
+ running_ps.append(last_started_idx)
178
+ else:
179
+ break
180
+ for running_p in running_ps:
181
+ if not procs[running_p].is_alive():
182
+ running_ps.remove(running_p)
183
+ time.sleep(1)
184
+
185
+ for p in procs:
186
+ p.join()
187
+
188
+ pbarQueue.put((0, 0))
189
+ pbarProc.terminate()
190
+
191
+ p1_coords_shm.close()
192
+ p1_coords_shm.unlink()
193
+ p2_coords_shm.close()
194
+ p2_coords_shm.unlink()
195
+
196
+ out_attrs = {
197
+ key: (
198
+ np.empty((query_coords.shape[0], 3, 3), dtype=val.dtype)
199
+ if key == "m3c2_cov1" or key == "m3c2_cov2"
200
+ else np.empty(query_coords.shape[0], dtype=val.dtype)
201
+ )
202
+ for key, val in return_dict[0].items()
203
+ }
204
+ for key in out_attrs:
205
+ curr_start = 0
206
+ for i in range(NUM_BLOCKS):
207
+ curr_len = return_dict[i][key].shape[0]
208
+ out_attrs[key][curr_start : curr_start + curr_len] = return_dict[i][key]
209
+ curr_start += curr_len
210
+
211
+ distances = out_attrs["val"]
212
+ cov1 = out_attrs["m3c2_cov1"]
213
+ cov2 = out_attrs["m3c2_cov2"]
214
+ unc = {
215
+ "lodetection": out_attrs["lod_new"],
216
+ "spread1": out_attrs["m3c2_spread1"],
217
+ "num_samples1": out_attrs["m3c2_n1"],
218
+ "spread2": out_attrs["m3c2_spread2"],
219
+ "num_samples2": out_attrs["m3c2_n2"],
220
+ }
221
+
222
+ unc_list = []
223
+ cov_list = []
224
+ for i in range(unc["lodetection"].shape[0]):
225
+ unc_item = (
226
+ unc["lodetection"][i],
227
+ unc["spread1"][i],
228
+ unc["num_samples1"][i],
229
+ unc["spread2"][i],
230
+ unc["num_samples2"][i],
231
+ )
232
+ unc_list.append(unc_item)
233
+ cov_item = (cov1[i], cov2[i])
234
+ cov_list.append(cov_item)
235
+
236
+ uncertainties = np.array(
237
+ unc_list,
238
+ dtype=[
239
+ ("lodetection", "f8"),
240
+ ("spread1", "f8"),
241
+ ("num_samples1", "i8"),
242
+ ("spread2", "f8"),
243
+ ("num_samples2", "i8"),
244
+ ],
245
+ )
246
+ covariance = np.array(
247
+ cov_list, dtype=[("cov1", "f8", (3, 3)), ("cov2", "f8", (3, 3))]
248
+ )
249
+ print(self.name + " end")
250
+ return distances, uncertainties, covariance
251
+
252
+ @property
253
+ def name(self):
254
+ return "M3C2EP"
255
+
256
+
257
+ def updatePbar(total, queue, maxProc):
258
+ desc = "Processing core points"
259
+ pCount = 0
260
+ if tqdm is None:
261
+ pbar = None
262
+ else:
263
+ pbar = tqdm(
264
+ total=total,
265
+ ncols=100,
266
+ desc=desc + " (%02d/%02d Process(es))" % (pCount, maxProc),
267
+ )
268
+
269
+ while True:
270
+ inc, process = queue.get()
271
+ if pbar is not None:
272
+ pbar.update(inc)
273
+ if process != 0:
274
+ pCount += process
275
+ pbar.set_description(
276
+ desc + " (%02d/%02d Process(es))" % (pCount, maxProc)
277
+ )
278
+
279
+
280
+ eijk = np.zeros((3, 3, 3))
281
+ eijk[0, 1, 2] = eijk[1, 2, 0] = eijk[2, 0, 1] = 1
282
+ eijk[0, 2, 1] = eijk[2, 1, 0] = eijk[1, 0, 2] = -1
283
+
284
+ dij = np.zeros((3, 3))
285
+ dij[0, 0] = dij[1, 1] = dij[2, 2] = 1
286
+
287
+ n = np.zeros((3,))
288
+ poa_pts = np.zeros((3, 100))
289
+ path_opt = np.einsum_path(
290
+ "mi, ijk, j, kn -> mn", dij, eijk, n, poa_pts, optimize="optimal"
291
+ )
292
+
293
+
294
+ def getAlongAcrossSqBatch(pts, poa, n):
295
+ pts_poa = pts - poa[:, np.newaxis]
296
+ alongs = n.dot(pts_poa)
297
+ poa_pts = poa[:, np.newaxis] - pts
298
+ crosses = np.einsum(
299
+ "mi, ijk, j, kn -> mn", dij, eijk, n, poa_pts, optimize=path_opt[0]
300
+ )
301
+ across2 = np.einsum("ij, ij -> j", crosses, crosses)
302
+ return (alongs, across2)
303
+
304
+
305
+ def get_local_mean_and_Cxx_nocorr(
306
+ Cxx, tfM, origins, redPoint, sigmas, curr_pts, curr_pos, epoch, tf=True
307
+ ):
308
+ nPts = curr_pts.shape[0]
309
+ A = np.tile(np.eye(3), (nPts, 1))
310
+ ATP = np.zeros((3, 3 * nPts))
311
+ tfM = tfM if tf else np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
312
+ dx = np.zeros((nPts,), dtype=np.float64)
313
+ dy = np.zeros((nPts,), dtype=np.float64)
314
+ dz = np.zeros((nPts,), dtype=np.float64)
315
+ rrange = np.zeros((nPts,), dtype=np.float64)
316
+ sinscan = np.zeros((nPts,), dtype=np.float64)
317
+ cosscan = np.zeros((nPts,), dtype=np.float64)
318
+ cosyaw = np.zeros((nPts,), dtype=np.float64)
319
+ sinyaw = np.zeros((nPts,), dtype=np.float64)
320
+ sigmaRange = np.zeros((nPts,), dtype=np.float64)
321
+ sigmaYaw = np.zeros((nPts,), dtype=np.float64)
322
+ sigmaScan = np.zeros((nPts,), dtype=np.float64)
323
+
324
+ for scanPosId in np.unique(curr_pos):
325
+ scanPos = np.array(origins[scanPosId - 1, :])
326
+ scanPosPtsIdx = curr_pos == scanPosId
327
+
328
+ dd = curr_pts[scanPosPtsIdx, :] - scanPos[np.newaxis, :]
329
+ dlx, dly, dlz = dd[:, 0], dd[:, 1], dd[:, 2]
330
+ yaw = np.arctan2(dly, dlx)
331
+ planar_dist = np.hypot(dlx, dly)
332
+ scan = np.pi / 2 - np.arctan(dlz / planar_dist)
333
+ rrange[scanPosPtsIdx] = np.hypot(planar_dist, dlz)
334
+ sinscan[scanPosPtsIdx] = np.sin(scan)
335
+ cosscan[scanPosPtsIdx] = np.cos(scan)
336
+ sinyaw[scanPosPtsIdx] = np.sin(yaw)
337
+ cosyaw[scanPosPtsIdx] = np.cos(yaw)
338
+
339
+ dr = curr_pts[scanPosPtsIdx, :] - redPoint
340
+ dx[scanPosPtsIdx] = dr[:, 0]
341
+ dy[scanPosPtsIdx] = dr[:, 1]
342
+ dz[scanPosPtsIdx] = dr[:, 2]
343
+
344
+ sigmaRange[scanPosPtsIdx] = np.array(
345
+ np.sqrt(
346
+ sigmas[scanPosId - 1][0] ** 2
347
+ + sigmas[scanPosId - 1][1] * 1e-6 * rrange[scanPosPtsIdx] ** 2
348
+ )
349
+ ) # a + b*d
350
+ sigmaYaw[scanPosPtsIdx] = np.array(sigmas[scanPosId - 1][2])
351
+ sigmaScan[scanPosPtsIdx] = np.array(sigmas[scanPosId - 1][3])
352
+
353
+ if tf:
354
+ SigmaXiXj = (
355
+ dx**2 * Cxx[0, 0]
356
+ + 2 * dx * dy * Cxx[0, 1] # a11a11
357
+ + dy**2 * Cxx[1, 1] # a11a12
358
+ + 2 * dy * dz * Cxx[1, 2] # a12a12
359
+ + dz**2 * Cxx[2, 2] # a12a13
360
+ + 2 * dz * dx * Cxx[0, 2] # a13a13
361
+ + 2 # a11a13
362
+ * (dx * Cxx[0, 9] + dy * Cxx[1, 9] + dz * Cxx[2, 9]) # a11tx # a12tx
363
+ + Cxx[9, 9] # a13tx
364
+ ) # txtx
365
+
366
+ SigmaYiYj = (
367
+ dx**2 * Cxx[3, 3]
368
+ + 2 * dx * dy * Cxx[3, 4] # a21a21
369
+ + dy**2 * Cxx[4, 4] # a21a22
370
+ + 2 * dy * dz * Cxx[4, 5] # a22a22
371
+ + dz**2 * Cxx[5, 5] # a22a23
372
+ + 2 * dz * dx * Cxx[3, 5] # a23a23
373
+ + 2 # a21a23
374
+ * (dx * Cxx[3, 10] + dy * Cxx[4, 10] + dz * Cxx[5, 10]) # a21ty # a22ty
375
+ + Cxx[10, 10] # a23ty
376
+ ) # tyty
377
+
378
+ SigmaZiZj = (
379
+ dx**2 * Cxx[6, 6]
380
+ + 2 * dx * dy * Cxx[6, 7] # a31a31
381
+ + dy**2 * Cxx[7, 7] # a31a32
382
+ + 2 * dy * dz * Cxx[7, 8] # a32a32
383
+ + dz**2 * Cxx[8, 8] # a32a33
384
+ + 2 * dz * dx * Cxx[6, 8] # a33a33
385
+ + 2 # a31a33
386
+ * (dx * Cxx[6, 11] + dy * Cxx[7, 11] + dz * Cxx[8, 11]) # a31tz # a32tz
387
+ + Cxx[11, 11] # a33tz
388
+ ) # tztz
389
+
390
+ SigmaXiYj = (
391
+ Cxx[9, 10]
392
+ + dx * Cxx[0, 10] # txty
393
+ + dy * Cxx[1, 10] # a11ty
394
+ + dz * Cxx[2, 10] # a12ty
395
+ + dx # a13ty
396
+ * (Cxx[3, 9] + Cxx[0, 3] * dx + Cxx[1, 3] * dy + Cxx[2, 3] * dz)
397
+ + dy * (Cxx[4, 9] + Cxx[0, 4] * dx + Cxx[1, 4] * dy + Cxx[2, 4] * dz)
398
+ + dz * (Cxx[5, 9] + Cxx[0, 5] * dx + Cxx[1, 5] * dy + Cxx[2, 5] * dz)
399
+ )
400
+
401
+ SigmaXiZj = (
402
+ Cxx[9, 11]
403
+ + dx * Cxx[0, 11] # txtz
404
+ + dy * Cxx[1, 11] # a11tz
405
+ + dz * Cxx[2, 11] # a12tz
406
+ + dx # a13tz
407
+ * (Cxx[6, 9] + Cxx[0, 6] * dx + Cxx[1, 6] * dy + Cxx[2, 6] * dz)
408
+ + dy * (Cxx[7, 9] + Cxx[0, 7] * dx + Cxx[1, 7] * dy + Cxx[2, 7] * dz)
409
+ + dz * (Cxx[8, 9] + Cxx[0, 8] * dx + Cxx[1, 8] * dy + Cxx[2, 8] * dz)
410
+ )
411
+
412
+ SigmaYiZj = (
413
+ Cxx[10, 11]
414
+ + dx * Cxx[6, 10] # tytz
415
+ + dy * Cxx[7, 10] # a21tx
416
+ + dz * Cxx[8, 10] # a22tx
417
+ + dx # a23tx
418
+ * (Cxx[3, 11] + Cxx[3, 6] * dx + Cxx[3, 7] * dy + Cxx[3, 8] * dz)
419
+ + dy * (Cxx[4, 11] + Cxx[4, 6] * dx + Cxx[4, 7] * dy + Cxx[4, 8] * dz)
420
+ + dz * (Cxx[5, 11] + Cxx[5, 6] * dx + Cxx[5, 7] * dy + Cxx[5, 8] * dz)
421
+ )
422
+ C11 = np.sum(SigmaXiXj) # sum over all j
423
+ C12 = np.sum(SigmaXiYj) # sum over all j
424
+ C13 = np.sum(SigmaXiZj) # sum over all j
425
+ C22 = np.sum(SigmaYiYj) # sum over all j
426
+ C23 = np.sum(SigmaYiZj) # sum over all j
427
+ C33 = np.sum(SigmaZiZj) # sum over all j
428
+ local_Cxx = np.array([[C11, C12, C13], [C12, C22, C23], [C13, C23, C33]])
429
+ else:
430
+ local_Cxx = np.zeros((3, 3))
431
+
432
+ C11p = (
433
+ (
434
+ tfM[0, 0] * cosyaw * sinscan
435
+ + tfM[0, 1] * sinyaw * sinscan # dX/dRange - measurements
436
+ + tfM[0, 2] * cosscan
437
+ )
438
+ ** 2
439
+ * sigmaRange**2
440
+ + (
441
+ -1 * tfM[0, 0] * rrange * sinyaw * sinscan
442
+ + tfM[0, 1] * rrange * cosyaw * sinscan # dX/dYaw
443
+ )
444
+ ** 2
445
+ * sigmaYaw**2
446
+ + (
447
+ tfM[0, 0] * rrange * cosyaw * cosscan
448
+ + tfM[0, 1] * rrange * sinyaw * cosscan # dX/dScan
449
+ + -1 * tfM[0, 2] * rrange * sinscan
450
+ )
451
+ ** 2
452
+ * sigmaScan**2
453
+ )
454
+
455
+ C12p = (
456
+ (
457
+ tfM[1, 0] * cosyaw * sinscan
458
+ + tfM[1, 1] * sinyaw * sinscan # dY/dRange - measurements
459
+ + tfM[1, 2] * cosscan
460
+ )
461
+ * (
462
+ tfM[0, 0] * cosyaw * sinscan
463
+ + tfM[0, 1] * sinyaw * sinscan # dX/dRange - measurements
464
+ + tfM[0, 2] * cosscan
465
+ )
466
+ * sigmaRange**2
467
+ + (
468
+ -1 * tfM[1, 0] * rrange * sinyaw * sinscan
469
+ + tfM[1, 1] * rrange * cosyaw * sinscan # dY/dYaw
470
+ )
471
+ * (
472
+ -1 * tfM[0, 0] * rrange * sinyaw * sinscan
473
+ + tfM[0, 1] * rrange * cosyaw * sinscan # dX/dYaw
474
+ )
475
+ * sigmaYaw**2
476
+ + (
477
+ tfM[0, 0] * rrange * cosyaw * cosscan
478
+ + tfM[0, 1] * rrange * sinyaw * cosscan # dX/dScan
479
+ + -1 * tfM[0, 2] * rrange * sinscan
480
+ )
481
+ * (
482
+ tfM[1, 0] * rrange * cosyaw * cosscan
483
+ + tfM[1, 1] * rrange * sinyaw * cosscan # dY/dScan
484
+ + -1 * tfM[1, 2] * rrange * sinscan
485
+ )
486
+ * sigmaScan**2
487
+ )
488
+
489
+ C22p = (
490
+ (
491
+ tfM[1, 0] * cosyaw * sinscan
492
+ + tfM[1, 1] * sinyaw * sinscan # dY/dRange - measurements
493
+ + tfM[1, 2] * cosscan
494
+ )
495
+ ** 2
496
+ * sigmaRange**2
497
+ + (
498
+ -1 * tfM[1, 0] * rrange * sinyaw * sinscan
499
+ + tfM[1, 1] * rrange * cosyaw * sinscan # dY/dYaw
500
+ )
501
+ ** 2
502
+ * sigmaYaw**2
503
+ + (
504
+ tfM[1, 0] * rrange * cosyaw * cosscan
505
+ + tfM[1, 1] * rrange * sinyaw * cosscan # dY/dScan
506
+ + -1 * tfM[1, 2] * rrange * sinscan
507
+ )
508
+ ** 2
509
+ * sigmaScan**2
510
+ )
511
+
512
+ C23p = (
513
+ (
514
+ tfM[1, 0] * cosyaw * sinscan
515
+ + tfM[1, 1] * sinyaw * sinscan # dY/dRange - measurements
516
+ + tfM[1, 2] * cosscan
517
+ )
518
+ * (
519
+ tfM[2, 0] * cosyaw * sinscan
520
+ + tfM[2, 1] * sinyaw * sinscan # dZ/dRange - measurements
521
+ + tfM[2, 2] * cosscan
522
+ )
523
+ * sigmaRange**2
524
+ + (
525
+ -1 * tfM[1, 0] * rrange * sinyaw * sinscan
526
+ + tfM[1, 1] * rrange * cosyaw * sinscan # dY/dYaw
527
+ )
528
+ * (
529
+ -1 * tfM[2, 0] * rrange * sinyaw * sinscan
530
+ + tfM[2, 1] * rrange * cosyaw * sinscan # dZ/dYaw
531
+ )
532
+ * sigmaYaw**2
533
+ + (
534
+ tfM[2, 0] * rrange * cosyaw * cosscan
535
+ + tfM[2, 1] * rrange * sinyaw * cosscan # dZ/dScan
536
+ + -1 * tfM[2, 2] * rrange * sinscan
537
+ )
538
+ * (
539
+ tfM[1, 0] * rrange * cosyaw * cosscan
540
+ + tfM[1, 1] * rrange * sinyaw * cosscan # dY/dScan
541
+ + -1 * tfM[1, 2] * rrange * sinscan
542
+ )
543
+ * sigmaScan**2
544
+ )
545
+
546
+ C33p = (
547
+ (
548
+ tfM[2, 0] * cosyaw * sinscan
549
+ + tfM[2, 1] * sinyaw * sinscan # dZ/dRange - measurements
550
+ + tfM[2, 2] * cosscan
551
+ )
552
+ ** 2
553
+ * sigmaRange**2
554
+ + (
555
+ -1 * tfM[2, 0] * rrange * sinyaw * sinscan
556
+ + tfM[2, 1] * rrange * cosyaw * sinscan # dZ/dYaw
557
+ )
558
+ ** 2
559
+ * sigmaYaw**2
560
+ + (
561
+ tfM[2, 0] * rrange * cosyaw * cosscan
562
+ + tfM[2, 1] * rrange * sinyaw * cosscan # dZ/dScan
563
+ + -1 * tfM[2, 2] * rrange * sinscan
564
+ )
565
+ ** 2
566
+ * sigmaScan**2
567
+ )
568
+
569
+ C13p = (
570
+ (
571
+ tfM[2, 0] * cosyaw * sinscan
572
+ + tfM[2, 1] * sinyaw * sinscan # dZ/dRange - measurements
573
+ + tfM[2, 2] * cosscan
574
+ )
575
+ * (
576
+ tfM[0, 0] * cosyaw * sinscan
577
+ + tfM[0, 1] * sinyaw * sinscan # dX/dRange - measurements
578
+ + tfM[0, 2] * cosscan
579
+ )
580
+ * sigmaRange**2
581
+ + (
582
+ -1 * tfM[2, 0] * rrange * sinyaw * sinscan
583
+ + tfM[2, 1] * rrange * cosyaw * sinscan # dZ/dYaw
584
+ )
585
+ * (
586
+ -1 * tfM[0, 0] * rrange * sinyaw * sinscan
587
+ + tfM[0, 1] * rrange * cosyaw * sinscan # dX/dYaw
588
+ )
589
+ * sigmaYaw**2
590
+ + (
591
+ tfM[2, 0] * rrange * cosyaw * cosscan
592
+ + tfM[2, 1] * rrange * sinyaw * cosscan # dZ/dScan
593
+ + -1 * tfM[2, 2] * rrange * sinscan
594
+ )
595
+ * (
596
+ tfM[0, 0] * rrange * cosyaw * cosscan
597
+ + tfM[0, 1] * rrange * sinyaw * cosscan # dX/dScan
598
+ + -1 * tfM[0, 2] * rrange * sinscan
599
+ )
600
+ * sigmaScan**2
601
+ )
602
+ local_Cxx[0, 0] += np.sum(C11p)
603
+ local_Cxx[0, 1] += np.sum(C12p)
604
+ local_Cxx[0, 2] += np.sum(C13p)
605
+ local_Cxx[1, 0] += np.sum(C12p)
606
+ local_Cxx[1, 1] += np.sum(C22p)
607
+ local_Cxx[1, 2] += np.sum(C23p)
608
+ local_Cxx[2, 1] += np.sum(C23p)
609
+ local_Cxx[2, 0] += np.sum(C13p)
610
+ local_Cxx[2, 2] += np.sum(C33p)
611
+
612
+ # Get mean without correlation (averages out anyway, or something...)
613
+ for pii in range(nPts):
614
+ Cxx = np.array(
615
+ [
616
+ [C11p[pii], C12p[pii], C13p[pii]],
617
+ [C12p[pii], C22p[pii], C23p[pii]],
618
+ [C13p[pii], C23p[pii], C33p[pii]],
619
+ ]
620
+ )
621
+ if np.linalg.det(Cxx) == 0:
622
+ Cxx = np.eye(3)
623
+ Cix = np.linalg.inv(Cxx)
624
+ ATP[:, pii * 3 : (pii + 1) * 3] = Cix
625
+ N = np.dot(ATP, A)
626
+ Qxx = np.linalg.inv(N) # can only have > 0 in main diagonal!
627
+ pts_m = curr_pts.mean(axis=0)
628
+ l = (curr_pts - pts_m).flatten()
629
+ mean = np.dot(Qxx, np.dot(ATP, l)) + pts_m
630
+
631
+ return mean, local_Cxx / nPts
632
+
633
+
634
+ def process_corepoint_list(
635
+ corepoints,
636
+ corepoint_normals,
637
+ p1_idx,
638
+ p1_shm_name,
639
+ p1_size,
640
+ p1_positions,
641
+ p2_idx,
642
+ p2_shm_name,
643
+ p2_size,
644
+ p2_positions,
645
+ M3C2Meta,
646
+ idx,
647
+ return_dict,
648
+ pbarQueue,
649
+ ):
650
+ pbarQueue.put((0, 1))
651
+ p1_shm = mp.shared_memory.SharedMemory(name=p1_shm_name)
652
+ p2_shm = mp.shared_memory.SharedMemory(name=p2_shm_name)
653
+ p1_coords = np.ndarray(p1_size, dtype=np.float64, buffer=p1_shm.buf)
654
+ p2_coords = np.ndarray(p2_size, dtype=np.float64, buffer=p2_shm.buf)
655
+
656
+ max_dist = M3C2Meta["maxdist"]
657
+ search_radius = M3C2Meta["searchrad"]
658
+
659
+ M3C2_vals = np.full((corepoints.shape[0]), np.nan, dtype=np.float64)
660
+ M3C2_LoD = np.full((corepoints.shape[0]), np.nan, dtype=np.float64)
661
+ M3C2_N1 = np.full((corepoints.shape[0]), np.nan, dtype=np.int32)
662
+ M3C2_N2 = np.full((corepoints.shape[0]), np.nan, dtype=np.int32)
663
+
664
+ M3C2_spread1 = np.full((corepoints.shape[0]), np.nan, dtype=np.float64)
665
+ M3C2_spread2 = np.full((corepoints.shape[0]), np.nan, dtype=np.float64)
666
+ M3C2_cov1 = np.full((corepoints.shape[0], 3, 3), np.nan, dtype=np.float64)
667
+ M3C2_cov2 = np.full((corepoints.shape[0], 3, 3), np.nan, dtype=np.float64)
668
+
669
+ for cp_idx, p1_neighbours in enumerate(p1_idx):
670
+ n = corepoint_normals[cp_idx]
671
+ p1_curr_pts = p1_coords[p1_neighbours, :]
672
+ along1, acrossSq1 = getAlongAcrossSqBatch(p1_curr_pts.T, corepoints[cp_idx], n)
673
+ p1_curr_pts = p1_curr_pts[
674
+ np.logical_and(np.abs(along1) <= max_dist, acrossSq1 <= search_radius**2),
675
+ :,
676
+ ]
677
+ p1_scanPos = p1_positions[p1_neighbours]
678
+ p1_scanPos = p1_scanPos[
679
+ np.logical_and(np.abs(along1) <= max_dist, acrossSq1 <= search_radius**2)
680
+ ]
681
+ if p1_curr_pts.shape[0] < M3C2Meta["minneigh"]:
682
+ pbarQueue.put((1, 0)) # point processed
683
+ M3C2_N1[cp_idx] = p1_curr_pts.shape[0]
684
+ continue
685
+ elif p1_curr_pts.shape[0] > M3C2Meta["maxneigh"]:
686
+ p1_curr_pts = p1_curr_pts[np.argsort(acrossSq1[: M3C2Meta["maxneigh"]])]
687
+ p1_scanPos = p1_scanPos[np.argsort(acrossSq1[: M3C2Meta["maxneigh"]])]
688
+
689
+ Cxx = M3C2Meta["Cxx"]
690
+ tfM = M3C2Meta["tfM"]
691
+ origins = np.array([SP["origin"] for SP in M3C2Meta["spInfos"][0]])
692
+ redPoint = M3C2Meta["redPoint"]
693
+ sigmas = np.array(
694
+ [
695
+ [
696
+ SP["sigma_range"],
697
+ SP["sigma_range"],
698
+ SP["sigma_scan"],
699
+ SP["sigma_yaw"],
700
+ ]
701
+ for SP in M3C2Meta["spInfos"][0]
702
+ ]
703
+ )
704
+
705
+ p1_weighted_CoG, p1_local_Cxx = get_local_mean_and_Cxx_nocorr(
706
+ Cxx,
707
+ tfM,
708
+ origins,
709
+ redPoint,
710
+ sigmas,
711
+ p1_curr_pts,
712
+ p1_scanPos,
713
+ epoch=0,
714
+ tf=False,
715
+ ) # only one dataset has been transformed
716
+ along1_var = np.var(
717
+ along1[
718
+ np.logical_and(
719
+ np.abs(along1) <= max_dist, acrossSq1 <= search_radius**2
720
+ )
721
+ ]
722
+ )
723
+
724
+ p2_neighbours = p2_idx[cp_idx]
725
+ p2_curr_pts = p2_coords[p2_neighbours, :]
726
+ along2, acrossSq2 = getAlongAcrossSqBatch(p2_curr_pts.T, corepoints[cp_idx], n)
727
+ p2_curr_pts = p2_curr_pts[
728
+ np.logical_and(np.abs(along2) <= max_dist, acrossSq2 <= search_radius**2),
729
+ :,
730
+ ]
731
+ p2_scanPos = p2_positions[p2_neighbours]
732
+ p2_scanPos = p2_scanPos[
733
+ np.logical_and(np.abs(along2) <= max_dist, acrossSq2 <= search_radius**2)
734
+ ]
735
+ if p2_curr_pts.shape[0] < M3C2Meta["minneigh"]:
736
+ pbarQueue.put((1, 0)) # point processed
737
+ M3C2_N2[cp_idx] = p2_curr_pts.shape[0]
738
+ continue
739
+ elif p2_curr_pts.shape[0] > M3C2Meta["maxneigh"]:
740
+ p2_curr_pts = p2_curr_pts[np.argsort(acrossSq2[: M3C2Meta["maxneigh"]])]
741
+ p2_scanPos = p2_scanPos[np.argsort(acrossSq2[: M3C2Meta["maxneigh"]])]
742
+
743
+ origins = np.array([SP["origin"] for SP in M3C2Meta["spInfos"][1]])
744
+ sigmas = np.array(
745
+ [
746
+ [
747
+ SP["sigma_range"],
748
+ SP["sigma_range"],
749
+ SP["sigma_scan"],
750
+ SP["sigma_yaw"],
751
+ ]
752
+ for SP in M3C2Meta["spInfos"][1]
753
+ ]
754
+ )
755
+ p2_weighted_CoG, p2_local_Cxx = get_local_mean_and_Cxx_nocorr(
756
+ Cxx,
757
+ tfM,
758
+ origins,
759
+ redPoint,
760
+ sigmas,
761
+ p2_curr_pts,
762
+ p2_scanPos,
763
+ epoch=1,
764
+ tf=True,
765
+ )
766
+ along2_var = np.var(
767
+ along2[
768
+ np.logical_and(
769
+ np.abs(along2) <= max_dist, acrossSq2 <= search_radius**2
770
+ )
771
+ ]
772
+ )
773
+
774
+ p1_CoG = p1_weighted_CoG
775
+ p2_CoG = p2_weighted_CoG
776
+
777
+ p1_CoG_Cxx = p1_local_Cxx
778
+ p2_CoG_Cxx = p2_local_Cxx
779
+
780
+ p1_p2_CoG_Cxx = np.zeros((6, 6))
781
+ p1_p2_CoG_Cxx[0:3, 0:3] = p1_CoG_Cxx
782
+ p1_p2_CoG_Cxx[3:6, 3:6] = p2_CoG_Cxx
783
+
784
+ M3C2_dist = n.dot(p1_CoG - p2_CoG)
785
+ F = np.hstack([-n, n])
786
+
787
+ M3C2_vals[cp_idx] = M3C2_dist
788
+
789
+ N1 = p1_curr_pts.shape[0]
790
+ N2 = p2_curr_pts.shape[0]
791
+
792
+ sigmaD = p1_CoG_Cxx + p2_CoG_Cxx
793
+
794
+ p = 3 # three dimensional
795
+ Tsqalt = n.T.dot(np.linalg.inv(sigmaD)).dot(n)
796
+
797
+ M3C2_LoD[cp_idx] = np.sqrt(sstats.chi2.ppf(0.95, p) / Tsqalt)
798
+ M3C2_N1[cp_idx] = N1
799
+ M3C2_N2[cp_idx] = N2
800
+
801
+ # add M3C2 spreads
802
+ normal = n[np.newaxis, :]
803
+ M3C2_spread1[cp_idx] = np.sqrt(
804
+ np.matmul(np.matmul(normal, p1_CoG_Cxx), normal.T)
805
+ )
806
+ M3C2_spread2[cp_idx] = np.sqrt(
807
+ np.matmul(np.matmul(normal, p2_CoG_Cxx), normal.T)
808
+ )
809
+ M3C2_cov1[cp_idx] = p1_CoG_Cxx
810
+ M3C2_cov2[cp_idx] = p2_CoG_Cxx
811
+
812
+ pbarQueue.put((1, 0)) # point processed
813
+
814
+ return_dict[idx] = {
815
+ "lod_new": M3C2_LoD,
816
+ "val": M3C2_vals,
817
+ "m3c2_n1": M3C2_N1,
818
+ "m3c2_n2": M3C2_N2,
819
+ "m3c2_spread1": M3C2_spread1,
820
+ "m3c2_spread2": M3C2_spread2,
821
+ "m3c2_cov1": M3C2_cov1,
822
+ "m3c2_cov2": M3C2_cov2,
823
+ }
824
+ pbarQueue.put((0, -1))
825
+ p1_shm.close()
826
+ p2_shm.close()
827
+
828
+
829
+ def radius_search(epoch: Epoch, query: np.ndarray, radius: float):
830
+ """Query the tree for neighbors within a radius r
831
+ :param query:
832
+ An array of points to query.
833
+ Array-like of shape (n_samples, 3) or query 1 sample point of shape (3,)
834
+ :type query: array
835
+ :param radius:
836
+ Rebuild the search tree even if it was already built before.
837
+ :type radius: float
838
+ """
839
+ if len(query.shape) == 1 and query.shape[0] == 3:
840
+ return [epoch.kdtree.radius_search(query, radius)]
841
+
842
+ if len(query.shape) == 2 and query.shape[1] == 3:
843
+ neighbors = []
844
+ for i in range(query.shape[0]):
845
+ q = query[i]
846
+ result = epoch.kdtree.radius_search(q, radius)
847
+ neighbors.append(result)
848
+ return neighbors
849
+
850
+ raise Py4DGeoError(
851
+ "Please ensure queries are array-like of shape (n_samples, 3)"
852
+ " or of shape (3,) to query 1 sample point!"
853
+ )
854
+
855
+ return None