libxrk 0.6.0__cp313-cp313-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
libxrk/gps.py ADDED
@@ -0,0 +1,526 @@
1
+ # Copyright 2024, Scott Smith. MIT License (see LICENSE).
2
+
3
+ from collections import namedtuple
4
+ from typing import TYPE_CHECKING
5
+
6
+ import numpy as np
7
+ import pyarrow as pa
8
+
9
+ if TYPE_CHECKING:
10
+ from .base import LogFile
11
+
12
+ # GPS channel names that share a common timebase and may need timing correction
13
+ GPS_CHANNEL_NAMES = ("GPS Speed", "GPS Latitude", "GPS Longitude", "GPS Altitude")
14
+
15
+ # None of the algorithms are slow
16
+ # fastest: Fukushima 2006, but worse accuracy
17
+ # most accurate: Vermeille, also very compact code
18
+ # runner up: Osen
19
+
20
+
21
+ # Also considered:
22
+
23
+ # https://ea4eoz.blogspot.com/2015/11/simple-wgs-84-ecef-conversion-functions.html
24
+ # slower algorithm, not great accuracy
25
+
26
+ # http://wiki.gis.com/wiki/index.php/Geodetic_system
27
+ # poor height accuracy
28
+
29
+ # Olson, D. K., Converting Earth-Centered, Earth-Fixed Coordinates to
30
+ # Geodetic Coordinates, IEEE Transactions on Aerospace and Electronic
31
+ # Systems, 32 (1996) 473-476.
32
+ # difficult to vectorize (poor for numpy/python)
33
+
34
+ GPS = namedtuple("GPS", ["lat", "long", "alt"])
35
+
36
+
37
+ # convert lat/long/zoom to web mercator. lat/long are degrees
38
+ # returns x,y as floats - integer component is which tile to download
39
+ def llz2web(lat, long, zoom=0):
40
+ # wikipedia web mercator projection
41
+ mult = 0.25 * (2 << zoom)
42
+ return (
43
+ mult * (1 + long / 180),
44
+ mult * (1 - np.log(np.tan(np.pi / 4 + np.pi / 360 * lat)) / np.pi),
45
+ )
46
+
47
+
48
+ # returns lat/long as floats in degrees
49
+ def web2ll(x, y, zoom=0):
50
+ mult = 1 / (0.25 * (2 << zoom))
51
+ return (
52
+ np.arctan(np.exp(np.pi - np.multiply(np.pi * mult, y))) * 360 / np.pi - 90,
53
+ np.multiply(180 * mult, x) - 180,
54
+ )
55
+
56
+
57
+ # lat, long = degrees
58
+ # x, y, z, alt = meters
59
+ def lla2ecef(lat, lon, alt):
60
+ a = 6378137
61
+ e = 8.181919084261345e-2
62
+ e_sq = e * e
63
+
64
+ lat = lat * (np.pi / 180)
65
+ lon = lon * (np.pi / 180)
66
+
67
+ clat = np.cos(lat)
68
+ slat = np.sin(lat)
69
+
70
+ N = a / np.sqrt(1 - e_sq * slat * slat)
71
+
72
+ x = (N + alt) * clat * np.cos(lon)
73
+ y = (N + alt) * clat * np.sin(lon)
74
+ z = ((1 - e_sq) * N + alt) * slat
75
+
76
+ return x, y, z
77
+
78
+
79
+ # Karl Osen. Accurate Conversion of Earth-Fixed Earth-Centered Coordinates to Geodetic Coordinates.
80
+ # [Research Report] Norwegian University of Science and Technology. 2017. ￿hal-01704943v2
81
+ # https://hal.science/hal-01704943v2/document
82
+ # pretty accurate, reasonably fast
83
+ def ecef2lla_osen(x, y, z):
84
+ invaa = +2.45817225764733181057e-0014 # 1/(a^2)
85
+ l = +3.34718999507065852867e-0003 # (e^2)/2
86
+ p1mee = +9.93305620009858682943e-0001 # 1-(e^2)
87
+ p1meedaa = +2.44171631847341700642e-0014 # (1-(e^2))/(a^2)
88
+ ll4 = +4.48147234524044602618e-0005 # 4*(l^2) = e^4
89
+ ll = +1.12036808631011150655e-0005 # l^2 = (e^4)/4
90
+ invcbrt2 = +7.93700525984099737380e-0001 # 1/(2^(1/3))
91
+ inv3 = +3.33333333333333333333e-0001 # 1/3
92
+ inv6 = +1.66666666666666666667e-0001 # 1/6
93
+
94
+ w = x * x + y * y
95
+ m = w * invaa
96
+ w = np.sqrt(w)
97
+ n = z * z * p1meedaa
98
+ mpn = m + n
99
+ p = inv6 * (mpn - ll4)
100
+ P = p * p
101
+ G = m * n * ll
102
+ H = 2 * P * p + G
103
+ p = None
104
+ # if H < Hmin: return -1
105
+ C = np.cbrt(H + G + 2 * np.sqrt(H * G)) * invcbrt2
106
+ G = None
107
+ H = None
108
+ i = -ll - 0.5 * mpn
109
+ beta = inv3 * i - C - P / C
110
+ C = None
111
+ P = None
112
+ k = ll * (ll - mpn)
113
+ mpn = None
114
+ # Compute t
115
+ t = np.sqrt(np.sqrt(beta * beta - k) - 0.5 * (beta + i)) + np.sqrt(np.abs(0.5 * (beta - i))) * (
116
+ 2 * (m < n) - 1
117
+ )
118
+ beta = None
119
+ # Use Newton-Raphson's method to compute t correction
120
+ g = 2 * l * (m - n)
121
+ m = None
122
+ n = None
123
+ tt = t * t
124
+ dt = -(tt * (tt + (i + i)) + g * t + k) / (4 * t * (tt + i) + g)
125
+ g = None
126
+ i = None
127
+ tt = None
128
+ # compute latitude (range -PI/2..PI/2)
129
+ u = t + dt + l
130
+ v = t + dt - l
131
+ dt = None
132
+ zu = z * u
133
+ wv = w * v
134
+ # compute altitude
135
+ invuv = 1 / (u * v)
136
+ return GPS(
137
+ np.arctan2(zu, wv) * (180 / np.pi),
138
+ np.arctan2(y, x) * (180 / np.pi),
139
+ np.sqrt(np.square(w - wv * invuv) + np.square(z - zu * p1mee * invuv)) * (1 - 2 * (u < 1)),
140
+ )
141
+
142
+
143
+ # https://www.researchgate.net/publication/227215135_Transformation_from_Cartesian_to_Geodetic_Coordinates_Accelerated_by_Halley's_Method/link/0912f50af90e6de252000000/download
144
+ # "Fukushima 2006"
145
+ # fastest, reasonably accurate but not best
146
+ def ecef2lla_fukushima2006(x, y, z):
147
+ a = 6378137.0
148
+ finv = 298.257222101
149
+ f = 1.0 / finv
150
+ e2 = (2 - f) * f
151
+ ec2 = 1 - e2
152
+ ec = np.sqrt(ec2)
153
+ # b = a * ec
154
+ c = a * e2
155
+ # PIH = 2 * np.arctan(1.)
156
+
157
+ lamb = np.arctan2(y, x)
158
+ s0 = np.abs(z)
159
+ p2 = x * x + y * y
160
+ p = np.sqrt(p2)
161
+ zc = ec * s0
162
+ c0 = ec * p
163
+ c02 = c0 * c0
164
+ s02 = s0 * s0
165
+ a02 = c02 + s02
166
+ a0 = np.sqrt(a02)
167
+ # a03 = a02 * a0
168
+ a03 = a02
169
+ a03 *= a0
170
+ a02 = None
171
+ # s1 = zc * a03 + c * (s02 * s0)
172
+ s02 *= s0
173
+ s02 *= c
174
+ s1 = s02
175
+ s1 += zc * a03
176
+ s02 = None
177
+ # c1 = p * a03 - c * (c02 * c0)
178
+ c02 *= c0
179
+ c02 *= c
180
+ c1 = p * a03 - c02
181
+ c02 = None
182
+ cs0c0 = c * c0 * s0
183
+ # b0 = 1.5 * cs0c0 * ((p*s0 - zc*c0) * a0 - cs0c0)
184
+ zc *= c0
185
+ b0 = cs0c0
186
+ b0 *= 1.5 * ((p * s0 - zc) * a0 - cs0c0)
187
+ a0 = None
188
+ zc = None
189
+ cs0c0 = None
190
+ s1 = s1 * a03 - b0 * s0
191
+ # cc = ec * (c1 * a03 - b0 * c0)
192
+ c1 *= a03
193
+ b0 *= c0
194
+ c1 -= b0
195
+ cc = c1
196
+ cc *= ec
197
+ c1 = None
198
+ a03 = None
199
+ c0 = None
200
+ b0 = None
201
+ phi = np.arctan2(s1, cc)
202
+ s12 = s1 * s1
203
+ cc2 = cc * cc
204
+ h = (p * cc + s0 * s1 - a * np.sqrt(ec2 * s12 + cc2)) / np.sqrt(s12 + cc2)
205
+ s1 = None
206
+ cc = None
207
+ s12 = None
208
+ cc2 = None
209
+ phi = np.copysign(phi, z)
210
+
211
+ return GPS(phi * (180 / np.pi), lamb * (180 / np.pi), h)
212
+
213
+
214
+ # Computing geodetic coordinates from geocentric coordinates
215
+ # H. Vermeille, 2003/2004
216
+ # http://users.auth.gr/kvek/78_Vermeille.pdf
217
+ def ecef2lla_vermeille2003(x, y, z):
218
+ a = 6378137.0
219
+ e = 8.181919084261345e-2
220
+
221
+ p = (x * x + y * y) * (1 / (a * a))
222
+ q = ((1 - e * e) / (a * a)) * z * z
223
+ r = (p + q - e**4) * (1 / 6)
224
+ s = (e**4 / 4) * p * q / (r**3)
225
+ p = None
226
+ t = np.cbrt(1 + s + np.sqrt(s * (2 + s)))
227
+ s = None
228
+ u = r * (1 + t + 1 / t)
229
+ r = None
230
+ t = None
231
+ v = np.sqrt(u * u + e**4 * q)
232
+ u += v # precalc
233
+ w = (e**2 / 2) * (u - q) / v
234
+ q = None
235
+ k = np.sqrt(u + w * w) - w
236
+ D = k * np.sqrt(x * x + y * y) / (k + e**2)
237
+ rtDDzz = np.sqrt(D * D + z * z)
238
+ return GPS(
239
+ (180 / np.pi) * 2 * np.arctan2(z, D + rtDDzz),
240
+ (180 / np.pi) * np.arctan2(y, x),
241
+ (k + e**2 - 1) / k * rtDDzz,
242
+ )
243
+
244
+
245
+ def find_crossing_idx(
246
+ XYZ: np.ndarray, marker: np.ndarray # coordinates to look up in (X, Y, Z), meters
247
+ ): # (lat, long), degrees
248
+
249
+ if isinstance(marker, tuple):
250
+ marker = np.array(marker)
251
+ if len(marker.shape) == 1:
252
+ # force it to be a 2d shape to make the rest of the code simpler
253
+ return find_crossing_idx(XYZ, marker.reshape((1, len(marker))))[0]
254
+
255
+ # very similar to gps lap insert, but we can assume XYZ is a
256
+ # reference (as opposed to GPS lap insert where we aren't sure if
257
+ # the trajectory of the GPS is correct or not - think pit stops,
258
+ # going off track, etc). As a result, only one pass is needed.
259
+ # Also we do not need to filter based on minspeed, so no timecodes
260
+ # are used in this function.
261
+
262
+ lat = marker[:, 0].reshape((len(marker), 1))
263
+ lon = marker[:, 1].reshape((len(marker), 1))
264
+ SO = np.stack(lla2ecef(lat, lon, 0), axis=2)
265
+ SD = np.stack(lla2ecef(lat, lon, 1000), axis=2) - SO
266
+
267
+ O = XYZ[:, :3]
268
+ D = O[1:] - O[:-1]
269
+ O = O[:-1] - SO
270
+
271
+ SN = np.sum(SD * SD, axis=2, keepdims=True) * D - np.sum(SD * D, axis=2, keepdims=True) * SD
272
+ t = np.clip(
273
+ -np.sum(SN * O, axis=2, keepdims=True) / np.sum(SN * D, axis=2, keepdims=True),
274
+ 0,
275
+ 1,
276
+ )
277
+
278
+ # XXX This won't work with rally stages (anything not a circuit)
279
+ distsq = np.sum(np.square(O + t * D), axis=2)
280
+ minidx = np.argmin(distsq, axis=1)
281
+ colrange = np.arange(t.shape[0])
282
+ return np.column_stack([minidx + t[colrange, minidx, 0], np.sqrt(distsq[colrange, minidx])])
283
+
284
+
285
+ def find_crossing_dist(
286
+ XYZD: np.ndarray, # coordinates to look up in (X, Y, Z, Distance), meters
287
+ marker: tuple[float, float],
288
+ ): # (lat, long) tuple, degrees
289
+ idx, _ = find_crossing_idx(XYZD, np.array(marker))
290
+ if idx + 1 >= len(XYZD):
291
+ return XYZD[int(idx), 3]
292
+ scale, idx = np.modf(idx)
293
+ return XYZD[int(idx), 3] + scale * (XYZD[int(idx) + 1, 3] - XYZD[int(idx), 3])
294
+
295
+
296
+ def find_laps(
297
+ XYZ: np.ndarray, # coordinates to look up in (X, Y, Z), meters
298
+ timecodes: np.ndarray, # time for above coordinates, ms
299
+ marker: tuple[float, float],
300
+ ): # (lat, long) tuple, degrees
301
+ # gps lap insert. We assume the start finish "line" is a
302
+ # plane containing the vector that goes through the GPS
303
+ # coordinates sf lat/long from altitude 0 to 1000. The normal
304
+ # of the plane is generally in line with the direction of
305
+ # travel, given the above constraint.
306
+
307
+ # O, D = vehicle vector (O=origin, D=direction, [0]=O, [1]=O+D)
308
+ # SO, SD = start finish origin, direction (plane must contain SO and SO+SD poitns)
309
+ # SN = start finish plane normal
310
+
311
+ # D = a*SD + SN
312
+ # 0 = SD . SN
313
+ # combine to get: 0 = SD . (D - a*SD)
314
+ # a * (SD . SD) = SD . D
315
+ # plug back into first eq:
316
+ # SN = D - (SD . D) / (SD . SD) * SD
317
+ # or to avoid division, and because length doesn't matter:
318
+ # SN = (SD . SD) * D - (SD. D) * SD
319
+
320
+ # now determine intersection with plane SO,SN from vector O,O+D:
321
+ # SN . (O + tD - SO) = 0
322
+ # t * (D . SN) + SN . (O - SO) = 0
323
+ # t = -SN.(O-SO) / D.SN
324
+
325
+ SO = np.array(lla2ecef(*marker, 0.0)).reshape((1, 3))
326
+ SD = np.array(lla2ecef(*marker, 1000)).reshape((1, 3)) - SO
327
+
328
+ O = XYZ - SO
329
+
330
+ D = O[1:] - O[:-1]
331
+ O = O[:-1]
332
+
333
+ # VBox seems to need 30, maybe my friend is using an old map description
334
+ marker_size = 30 # meters, how far you can be from the marker to count as a lap
335
+
336
+ # Precalculate in which time periods we were traveling at least 4 m/s (~10mph)
337
+ minspeed = np.sum(D * D, axis=1) > np.square((timecodes[1:] - timecodes[:-1]) * (4 / 1000))
338
+
339
+ SN = (
340
+ np.sum(SD * SD, axis=1).reshape((len(SD), 1)) * D
341
+ - np.sum(SD * D, axis=1).reshape((len(D), 1)) * SD
342
+ )
343
+ t = np.maximum(-np.sum(SN * O, axis=1) / np.sum(SN * D, axis=1), 0)
344
+ # This only works because the track is considered at altitude 0
345
+ dist = np.sum(np.square(O + t.reshape((len(t), 1)) * D), axis=1)
346
+ pick = (t[1:] <= 1) & (t[:-1] > 1) & (dist[1:] < marker_size**2)
347
+
348
+ # Now that we have a decent candidate selection of lap
349
+ # crossings, generate a single normal vector for the
350
+ # start/finish line to use for all lap crossings, to make the
351
+ # lap times more accurate/consistent. Weight the crossings by
352
+ # velocity and add them together. As it happens, SN is
353
+ # already weighted by velocity...
354
+ SN = np.sum(SN[1:][pick & minspeed[1:]], axis=0).reshape((1, 3))
355
+ # recompute t, dist, pick
356
+ t = np.maximum(-np.sum(SN * O, axis=1) / np.sum(SN * D, axis=1), 0)
357
+ dist = np.sum(np.square(O + t.reshape((len(t), 1)) * D), axis=1)
358
+ pick = (t[1:] <= 1) & (t[:-1] > 1) & (dist[1:] < marker_size**2)
359
+
360
+ lap_markers = [0]
361
+ for idx in np.nonzero(pick)[0] + 1:
362
+ if timecodes[idx] <= lap_markers[-1]:
363
+ continue
364
+ if not minspeed[idx]:
365
+ idx = np.argmax(minspeed[idx:]) + idx
366
+ lap_markers.append(timecodes[idx] + t[idx] * (timecodes[idx + 1] - timecodes[idx]))
367
+ return lap_markers[1:]
368
+
369
+
370
+ ecef2lla = ecef2lla_vermeille2003
371
+
372
+ if __name__ == "__main__":
373
+
374
+ def perf_test():
375
+ import time
376
+
377
+ samples = 10000000
378
+ lat = -90.0 + 180.0 * np.random.rand(samples, 1)
379
+ long = -180.0 + 360.0 * np.random.rand(samples, 1)
380
+ alt = -11e3 + (20e3) * np.random.rand(
381
+ samples, 1
382
+ ) # From approximately the bottom of the Mariana trench, to the top of the Everest
383
+
384
+ print("generating x,y,z")
385
+ x, y, z = lla2ecef(lat, long, alt)
386
+ algos = [
387
+ ("osen", ecef2lla_osen),
388
+ ("fukushima2006", ecef2lla_fukushima2006),
389
+ ("vermeille2003", ecef2lla_vermeille2003),
390
+ ]
391
+ stats: dict[str, list[float]] = {name: [] for name, algo in algos}
392
+ for _ in range(5):
393
+ for name, algo in algos:
394
+ start = time.time()
395
+ ilat, ilong, ialt = algo(x, y, z)
396
+ duration = time.time() - start
397
+ stats[name].append(duration)
398
+ print("algorithm %s took %.3f" % (name, duration))
399
+ print(
400
+ " avg",
401
+ np.sqrt(np.sum((ilat - lat) ** 2)) / len(ilat),
402
+ np.sqrt(np.sum((ilong - long) ** 2)) / len(ilong),
403
+ np.sqrt(np.sum((ialt - alt) ** 2)) / len(ialt),
404
+ )
405
+ print(
406
+ " max",
407
+ np.max(np.abs(ilat - lat)),
408
+ np.max(np.abs(ilong - long)),
409
+ np.max(np.abs(ialt - alt)),
410
+ )
411
+ for name, stat in stats.items():
412
+ print(name, ", ".join(["%.3f" % s for s in stat]))
413
+
414
+ perf_test()
415
+
416
+
417
+ def fix_gps_timing_gaps(log: "LogFile", expected_dt_ms: float = 40.0) -> "LogFile":
418
+ """Detect and correct large timing gaps in GPS channels and lap boundaries.
419
+
420
+ Some AIM data loggers produce GPS data with spurious timestamp jumps
421
+ (e.g., 65533ms gaps that should be ~40ms). This is likely caused by a
422
+ 16-bit overflow bug in the logger firmware. This function detects such
423
+ gaps and corrects the timecodes by removing the excess time.
424
+
425
+ The fix is applied in-place to the LogFile's channels dict and laps table.
426
+
427
+ Parameters
428
+ ----------
429
+ log : LogFile
430
+ The loaded log file with channels dict.
431
+ expected_dt_ms : float, default=40.0
432
+ Expected time delta between GPS samples in milliseconds.
433
+ Default is 40ms (25 Hz GPS).
434
+
435
+ Returns
436
+ -------
437
+ LogFile
438
+ The same LogFile object with corrected GPS timecodes and lap boundaries.
439
+ """
440
+ # Find the first GPS channel that exists
441
+ gps_channel_name = None
442
+ for name in GPS_CHANNEL_NAMES:
443
+ if name in log.channels:
444
+ gps_channel_name = name
445
+ break
446
+
447
+ if gps_channel_name is None:
448
+ return log
449
+
450
+ # Get the GPS timecodes
451
+ gps_table = log.channels[gps_channel_name]
452
+ gps_time = gps_table.column("timecodes").to_numpy()
453
+
454
+ if len(gps_time) < 2:
455
+ return log
456
+
457
+ # Detect gaps
458
+ dt = np.diff(gps_time)
459
+ gap_threshold = expected_dt_ms * 10 # 400ms default
460
+
461
+ # Find indices where gaps are too large
462
+ gap_indices = np.where(dt > gap_threshold)[0]
463
+
464
+ if len(gap_indices) == 0:
465
+ return log
466
+
467
+ # Build list of (gap_time, correction) pairs
468
+ gap_corrections = []
469
+ for gap_idx in gap_indices:
470
+ gap_time = gps_time[gap_idx]
471
+ gap_size = dt[gap_idx]
472
+ correction = gap_size - expected_dt_ms
473
+ gap_corrections.append((gap_time, correction))
474
+
475
+ # Fix GPS channel timecodes
476
+ gps_time_fixed = gps_time.astype(np.float64)
477
+ for gap_time, correction in gap_corrections:
478
+ gps_time_fixed[gps_time > gap_time] -= correction
479
+
480
+ gps_time_fixed = gps_time_fixed.astype(np.int64)
481
+
482
+ # Update all GPS channels with corrected timecodes
483
+ for name in GPS_CHANNEL_NAMES:
484
+ if name not in log.channels:
485
+ continue
486
+
487
+ table = log.channels[name]
488
+ value_column = table.column(name)
489
+ field = table.schema.field(name)
490
+ metadata = field.metadata
491
+
492
+ new_table = pa.table(
493
+ {
494
+ "timecodes": pa.array(gps_time_fixed, type=pa.int64()),
495
+ name: value_column,
496
+ }
497
+ )
498
+
499
+ if metadata:
500
+ new_field = new_table.schema.field(name).with_metadata(metadata)
501
+ new_schema = pa.schema([new_table.schema.field("timecodes"), new_field])
502
+ new_table = new_table.cast(new_schema)
503
+
504
+ log.channels[name] = new_table
505
+
506
+ # Fix lap boundaries (start_time, end_time)
507
+ if log.laps is not None and len(log.laps) > 0:
508
+ start_times = log.laps.column("start_time").to_numpy().astype(np.float64)
509
+ end_times = log.laps.column("end_time").to_numpy().astype(np.float64)
510
+
511
+ for gap_time, correction in gap_corrections:
512
+ start_times[start_times > gap_time] -= correction
513
+ end_times[end_times > gap_time] -= correction
514
+
515
+ new_laps_data = {}
516
+ for col_name in log.laps.column_names:
517
+ if col_name == "start_time":
518
+ new_laps_data[col_name] = pa.array(start_times.astype(np.int64), type=pa.int64())
519
+ elif col_name == "end_time":
520
+ new_laps_data[col_name] = pa.array(end_times.astype(np.int64), type=pa.int64())
521
+ else:
522
+ new_laps_data[col_name] = log.laps.column(col_name)
523
+
524
+ log.laps = pa.table(new_laps_data)
525
+
526
+ return log
libxrk/py.typed ADDED
File without changes