nrl-tracker 0.21.1__py3-none-any.whl → 0.22.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {nrl_tracker-0.21.1.dist-info → nrl_tracker-0.22.5.dist-info}/METADATA +4 -4
  2. {nrl_tracker-0.21.1.dist-info → nrl_tracker-0.22.5.dist-info}/RECORD +35 -33
  3. pytcl/__init__.py +1 -1
  4. pytcl/assignment_algorithms/data_association.py +2 -7
  5. pytcl/assignment_algorithms/jpda.py +43 -29
  6. pytcl/assignment_algorithms/two_dimensional/assignment.py +14 -7
  7. pytcl/astronomical/__init__.py +60 -7
  8. pytcl/astronomical/ephemerides.py +530 -0
  9. pytcl/astronomical/relativity.py +472 -0
  10. pytcl/atmosphere/__init__.py +2 -2
  11. pytcl/clustering/dbscan.py +23 -5
  12. pytcl/clustering/hierarchical.py +23 -10
  13. pytcl/clustering/kmeans.py +5 -10
  14. pytcl/containers/__init__.py +4 -21
  15. pytcl/containers/cluster_set.py +1 -10
  16. pytcl/containers/measurement_set.py +1 -9
  17. pytcl/coordinate_systems/projections/__init__.py +4 -2
  18. pytcl/dynamic_estimation/imm.py +42 -36
  19. pytcl/dynamic_estimation/kalman/extended.py +1 -4
  20. pytcl/dynamic_estimation/kalman/linear.py +17 -13
  21. pytcl/dynamic_estimation/kalman/unscented.py +27 -27
  22. pytcl/dynamic_estimation/particle_filters/bootstrap.py +57 -19
  23. pytcl/dynamic_estimation/smoothers.py +1 -5
  24. pytcl/dynamic_models/discrete_time/__init__.py +1 -5
  25. pytcl/dynamic_models/process_noise/__init__.py +1 -5
  26. pytcl/magnetism/__init__.py +3 -14
  27. pytcl/mathematical_functions/interpolation/__init__.py +2 -2
  28. pytcl/mathematical_functions/special_functions/__init__.py +2 -2
  29. pytcl/navigation/__init__.py +14 -10
  30. pytcl/navigation/ins.py +1 -5
  31. pytcl/trackers/__init__.py +3 -14
  32. pytcl/trackers/multi_target.py +1 -4
  33. {nrl_tracker-0.21.1.dist-info → nrl_tracker-0.22.5.dist-info}/LICENSE +0 -0
  34. {nrl_tracker-0.21.1.dist-info → nrl_tracker-0.22.5.dist-info}/WHEEL +0 -0
  35. {nrl_tracker-0.21.1.dist-info → nrl_tracker-0.22.5.dist-info}/top_level.txt +0 -0
@@ -26,8 +26,10 @@ Examples
26
26
  ... result.zone, result.hemisphere)
27
27
  """
28
28
 
29
- from pytcl.coordinate_systems.projections.projections import ( # Constants; Result types; Azimuthal Equidistant; UTM; Lambert Conformal Conic; Mercator; Stereographic; Transverse Mercator
30
- WGS84_A,
29
+ from pytcl.coordinate_systems.projections.projections import (
30
+ WGS84_A, # Constants; Result types; Azimuthal Equidistant; UTM; Lambert Conformal Conic; Mercator; Stereographic; Transverse Mercator
31
+ )
32
+ from pytcl.coordinate_systems.projections.projections import (
31
33
  WGS84_B,
32
34
  WGS84_E,
33
35
  WGS84_E2,
@@ -17,10 +17,7 @@ from typing import List, NamedTuple, Optional
17
17
  import numpy as np
18
18
  from numpy.typing import ArrayLike, NDArray
19
19
 
20
- from pytcl.dynamic_estimation.kalman.linear import (
21
- kf_predict,
22
- kf_update,
23
- )
20
+ from pytcl.dynamic_estimation.kalman.linear import kf_predict, kf_update
24
21
 
25
22
 
26
23
  class IMMState(NamedTuple):
@@ -131,15 +128,16 @@ def compute_mixing_probabilities(
131
128
  # Predicted mode probabilities: c_bar[j] = sum_i Pi[i,j] * mu[i]
132
129
  c_bar = Pi.T @ mode_probs
133
130
 
134
- # Mixing probabilities: mu[i|j] = Pi[i,j] * mu[i] / c_bar[j]
135
- mixing_probs = np.zeros((r, r))
136
- for j in range(r):
137
- if c_bar[j] > 1e-15:
138
- for i in range(r):
139
- mixing_probs[i, j] = Pi[i, j] * mode_probs[i] / c_bar[j]
140
- else:
141
- # Uniform if predicted probability is zero
142
- mixing_probs[:, j] = 1.0 / r
131
+ # Mixing probabilities: mu[i|j] = Pi[i,j] * mu[i] / c_bar[j] (vectorized)
132
+ # Compute numerator: Pi[i,j] * mu[i] for all i,j
133
+ numerator = Pi * mode_probs[:, np.newaxis]
134
+ # Divide by c_bar (with safe division for near-zero values)
135
+ safe_c_bar = np.where(c_bar > 1e-15, c_bar, 1.0)
136
+ mixing_probs = numerator / safe_c_bar
137
+ # Set uniform for columns where c_bar was too small
138
+ zero_mask = c_bar <= 1e-15
139
+ if np.any(zero_mask):
140
+ mixing_probs[:, zero_mask] = 1.0 / r
143
141
 
144
142
  return mixing_probs, c_bar
145
143
 
@@ -169,23 +167,30 @@ def mix_states(
169
167
  Mixed covariances for each mode.
170
168
  """
171
169
  r = len(mode_states)
172
- n = len(mode_states[0])
170
+
171
+ # Stack states and covariances for vectorized operations
172
+ states_array = np.array(mode_states) # shape (r, n)
173
+ covs_array = np.array(mode_covs) # shape (r, n, n)
173
174
 
174
175
  mixed_states = []
175
176
  mixed_covs = []
176
177
 
177
178
  for j in range(r):
178
- # Mixed state: x_0j = sum_i mu[i|j] * x_i
179
- x_mixed = np.zeros(n)
180
- for i in range(r):
181
- x_mixed += mixing_probs[i, j] * mode_states[i]
182
- mixed_states.append(x_mixed)
179
+ # Mixed state: x_0j = sum_i mu[i|j] * x_i (vectorized)
180
+ x_mixed = mixing_probs[:, j] @ states_array
183
181
 
184
182
  # Mixed covariance: P_0j = sum_i mu[i|j] * (P_i + (x_i - x_0j)(x_i - x_0j)^T)
185
- P_mixed = np.zeros((n, n))
186
- for i in range(r):
187
- diff = mode_states[i] - x_mixed
188
- P_mixed += mixing_probs[i, j] * (mode_covs[i] + np.outer(diff, diff))
183
+ # Compute differences for all modes at once
184
+ diffs = states_array - x_mixed # shape (r, n)
185
+ # Weighted covariances + outer products (vectorized)
186
+ weights = mixing_probs[:, j]
187
+ # Weighted sum of covariances
188
+ P_mixed = np.tensordot(weights, covs_array, axes=([0], [0]))
189
+ # Add weighted outer products: sum_i w_i * outer(diff_i, diff_i)
190
+ weighted_diffs = np.sqrt(weights)[:, np.newaxis] * diffs
191
+ P_mixed += weighted_diffs.T @ weighted_diffs
192
+
193
+ mixed_states.append(x_mixed)
189
194
  mixed_covs.append(P_mixed)
190
195
 
191
196
  return mixed_states, mixed_covs
@@ -215,19 +220,20 @@ def combine_estimates(
215
220
  P : ndarray
216
221
  Combined covariance.
217
222
  """
218
- r = len(mode_states)
219
- n = len(mode_states[0])
220
-
221
- # Combined state: x = sum_j mu_j * x_j
222
- x = np.zeros(n)
223
- for j in range(r):
224
- x += mode_probs[j] * mode_states[j]
225
-
226
- # Combined covariance: P = sum_j mu_j * (P_j + (x_j - x)(x_j - x)^T)
227
- P = np.zeros((n, n))
228
- for j in range(r):
229
- diff = mode_states[j] - x
230
- P += mode_probs[j] * (mode_covs[j] + np.outer(diff, diff))
223
+ # Stack states and covariances for vectorized operations
224
+ states_array = np.array(mode_states) # shape (r, n)
225
+ covs_array = np.array(mode_covs) # shape (r, n, n)
226
+
227
+ # Combined state: x = sum_j mu_j * x_j (vectorized)
228
+ x = mode_probs @ states_array
229
+
230
+ # Combined covariance: P = sum_j mu_j * (P_j + (x_j - x)(x_j - x)^T) (vectorized)
231
+ diffs = states_array - x # shape (r, n)
232
+ # Weighted sum of covariances
233
+ P = np.tensordot(mode_probs, covs_array, axes=([0], [0]))
234
+ # Add weighted outer products
235
+ weighted_diffs = np.sqrt(mode_probs)[:, np.newaxis] * diffs
236
+ P += weighted_diffs.T @ weighted_diffs
231
237
 
232
238
  # Ensure symmetry
233
239
  P = (P + P.T) / 2
@@ -10,10 +10,7 @@ from typing import Callable
10
10
  import numpy as np
11
11
  from numpy.typing import ArrayLike, NDArray
12
12
 
13
- from pytcl.dynamic_estimation.kalman.linear import (
14
- KalmanPrediction,
15
- KalmanUpdate,
16
- )
13
+ from pytcl.dynamic_estimation.kalman.linear import KalmanPrediction, KalmanUpdate
17
14
 
18
15
 
19
16
  def ekf_predict(
@@ -9,6 +9,7 @@ from typing import NamedTuple, Optional, Tuple
9
9
 
10
10
  import numpy as np
11
11
  from numpy.typing import ArrayLike, NDArray
12
+ from scipy.linalg import cho_factor, cho_solve
12
13
 
13
14
 
14
15
  class KalmanState(NamedTuple):
@@ -204,9 +205,22 @@ def kf_update(
204
205
  # Innovation covariance
205
206
  S = H @ P @ H.T + R
206
207
 
207
- # Kalman gain using solve for numerical stability
208
- # K = P @ H' @ S^{-1}
209
- K = np.linalg.solve(S.T, H @ P.T).T
208
+ # Use Cholesky decomposition for efficient solving (reused for gain and likelihood)
209
+ # This is more numerically stable and efficient than repeated solve() calls
210
+ try:
211
+ S_cho = cho_factor(S)
212
+ # Kalman gain: K = P @ H' @ S^{-1}
213
+ K = cho_solve(S_cho, H @ P.T).T
214
+ # Mahalanobis distance for likelihood
215
+ mahal_sq = y @ cho_solve(S_cho, y)
216
+ # Log determinant from Cholesky factor (more stable than det)
217
+ log_det_S = 2 * np.sum(np.log(np.diag(S_cho[0])))
218
+ m = len(z)
219
+ likelihood = np.exp(-0.5 * (mahal_sq + log_det_S + m * np.log(2 * np.pi)))
220
+ except np.linalg.LinAlgError:
221
+ # Fallback if Cholesky fails (S not positive definite)
222
+ K = np.linalg.solve(S.T, H @ P.T).T
223
+ likelihood = 0.0
210
224
 
211
225
  # Updated state
212
226
  x_upd = x + K @ y
@@ -218,16 +232,6 @@ def kf_update(
218
232
  # Ensure symmetry
219
233
  P_upd = (P_upd + P_upd.T) / 2
220
234
 
221
- # Compute likelihood for data association
222
- # p(z|x) = N(z; H@x, S)
223
- m = len(z)
224
- det_S = np.linalg.det(S)
225
- if det_S > 0:
226
- mahal_sq = y @ np.linalg.solve(S, y)
227
- likelihood = np.exp(-0.5 * mahal_sq) / np.sqrt((2 * np.pi) ** m * det_S)
228
- else:
229
- likelihood = 0.0
230
-
231
235
  return KalmanUpdate(
232
236
  x=x_upd,
233
237
  P=P_upd,
@@ -10,10 +10,7 @@ from typing import Callable, NamedTuple, Optional, Tuple
10
10
  import numpy as np
11
11
  from numpy.typing import ArrayLike, NDArray
12
12
 
13
- from pytcl.dynamic_estimation.kalman.linear import (
14
- KalmanPrediction,
15
- KalmanUpdate,
16
- )
13
+ from pytcl.dynamic_estimation.kalman.linear import KalmanPrediction, KalmanUpdate
17
14
 
18
15
 
19
16
  class SigmaPoints(NamedTuple):
@@ -203,11 +200,19 @@ def unscented_transform(
203
200
  # Weighted mean
204
201
  mean = np.sum(Wm[:, np.newaxis] * sigmas, axis=0)
205
202
 
206
- # Weighted covariance
203
+ # Weighted covariance (vectorized: avoids loop over sigma points)
207
204
  residuals = sigmas - mean
208
- cov = np.zeros((sigmas.shape[1], sigmas.shape[1]), dtype=np.float64)
209
- for i in range(len(sigmas)):
210
- cov += Wc[i] * np.outer(residuals[i], residuals[i])
205
+ # Compute weighted outer products in one operation: (W * residuals)^T @ residuals
206
+ weighted_residuals = np.sqrt(np.abs(Wc))[:, np.newaxis] * residuals
207
+ # Handle negative weights (e.g., from Merwe scaling) by adjusting sign
208
+ cov = weighted_residuals.T @ weighted_residuals
209
+ # Correct for any negative weights (subtract their contribution twice to flip sign)
210
+ neg_mask = Wc < 0
211
+ if np.any(neg_mask):
212
+ neg_residuals = residuals[neg_mask]
213
+ neg_weights = -Wc[neg_mask]
214
+ for i, (w, r) in enumerate(zip(neg_weights, neg_residuals)):
215
+ cov -= 2 * w * np.outer(r, r)
211
216
 
212
217
  if noise_cov is not None:
213
218
  cov += np.asarray(noise_cov, dtype=np.float64)
@@ -329,7 +334,6 @@ def ukf_update(
329
334
  z = np.asarray(z, dtype=np.float64).flatten()
330
335
  R = np.asarray(R, dtype=np.float64)
331
336
 
332
- n = len(x)
333
337
  m = len(z)
334
338
 
335
339
  # Generate sigma points
@@ -341,10 +345,11 @@ def ukf_update(
341
345
  # Predicted measurement mean and covariance
342
346
  z_pred, S = unscented_transform(sigmas_h, sp.Wm, sp.Wc, R)
343
347
 
344
- # Cross-covariance between state and measurement
345
- Pxz = np.zeros((n, m), dtype=np.float64)
346
- for i in range(len(sp.points)):
347
- Pxz += sp.Wc[i] * np.outer(sp.points[i] - x, sigmas_h[i] - z_pred)
348
+ # Cross-covariance between state and measurement (vectorized)
349
+ x_residuals = sp.points - x
350
+ z_residuals = sigmas_h - z_pred
351
+ # Weighted cross-covariance: sum of Wc[i] * outer(x_res[i], z_res[i])
352
+ Pxz = (sp.Wc[:, np.newaxis] * x_residuals).T @ z_residuals
348
353
 
349
354
  # Kalman gain
350
355
  K = np.linalg.solve(S.T, Pxz.T).T
@@ -467,12 +472,11 @@ def ckf_predict(
467
472
  # Predicted mean
468
473
  x_pred = np.sum(weights[:, np.newaxis] * transformed, axis=0)
469
474
 
470
- # Predicted covariance
475
+ # Predicted covariance (vectorized)
471
476
  residuals = transformed - x_pred
472
- P_pred = np.zeros((n, n), dtype=np.float64)
473
- for i in range(len(cubature_pts)):
474
- P_pred += weights[i] * np.outer(residuals[i], residuals[i])
475
- P_pred += Q
477
+ # All CKF weights are equal and positive, so vectorization is straightforward
478
+ weighted_residuals = np.sqrt(weights)[:, np.newaxis] * residuals
479
+ P_pred = weighted_residuals.T @ weighted_residuals + Q
476
480
 
477
481
  P_pred = (P_pred + P_pred.T) / 2
478
482
 
@@ -533,18 +537,14 @@ def ckf_update(
533
537
  # Predicted measurement
534
538
  z_pred = np.sum(weights[:, np.newaxis] * transformed, axis=0)
535
539
 
536
- # Innovation covariance
540
+ # Innovation covariance (vectorized)
537
541
  z_residuals = transformed - z_pred
538
- S = np.zeros((m, m), dtype=np.float64)
539
- for i in range(len(cubature_pts)):
540
- S += weights[i] * np.outer(z_residuals[i], z_residuals[i])
541
- S += R
542
+ weighted_z_residuals = np.sqrt(weights)[:, np.newaxis] * z_residuals
543
+ S = weighted_z_residuals.T @ weighted_z_residuals + R
542
544
 
543
- # Cross-covariance
545
+ # Cross-covariance (vectorized)
544
546
  x_residuals = cubature_pts - x
545
- Pxz = np.zeros((n, m), dtype=np.float64)
546
- for i in range(len(cubature_pts)):
547
- Pxz += weights[i] * np.outer(x_residuals[i], z_residuals[i])
547
+ Pxz = (weights[:, np.newaxis] * x_residuals).T @ z_residuals
548
548
 
549
549
  # Kalman gain
550
550
  K = np.linalg.solve(S.T, Pxz.T).T
@@ -8,6 +8,7 @@ state estimation.
8
8
  from typing import Callable, NamedTuple, Optional, Tuple
9
9
 
10
10
  import numpy as np
11
+ from numba import njit
11
12
  from numpy.typing import ArrayLike, NDArray
12
13
 
13
14
 
@@ -99,6 +100,27 @@ def resample_systematic(
99
100
  return particles[indices].copy()
100
101
 
101
102
 
103
+ @njit(cache=True)
104
+ def _resample_residual_deterministic(
105
+ particles: np.ndarray,
106
+ floor_Nw: np.ndarray,
107
+ ) -> Tuple[np.ndarray, int]:
108
+ """JIT-compiled deterministic copy portion of residual resampling."""
109
+ N = particles.shape[0]
110
+ n = particles.shape[1]
111
+ resampled = np.zeros((N, n), dtype=np.float64)
112
+
113
+ idx = 0
114
+ for i in range(N):
115
+ count = floor_Nw[i]
116
+ for _ in range(count):
117
+ for k in range(n):
118
+ resampled[idx, k] = particles[i, k]
119
+ idx += 1
120
+
121
+ return resampled, idx
122
+
123
+
102
124
  def resample_residual(
103
125
  particles: NDArray[np.floating],
104
126
  weights: NDArray[np.floating],
@@ -128,24 +150,16 @@ def resample_residual(
128
150
  rng = np.random.default_rng()
129
151
 
130
152
  N = len(weights)
131
- n = particles.shape[1]
132
153
 
133
154
  # Integer and fractional parts
134
155
  Nw = N * weights
135
- floor_Nw = np.floor(Nw).astype(int)
156
+ floor_Nw = np.floor(Nw).astype(np.int64)
136
157
  residual = Nw - floor_Nw
137
158
 
138
- # Number of deterministic copies (used implicitly via floor_Nw loop)
139
-
140
- # Allocate output
141
- resampled = np.zeros((N, n), dtype=np.float64)
142
-
143
- # Deterministic copies
144
- idx = 0
145
- for i in range(N):
146
- for _ in range(floor_Nw[i]):
147
- resampled[idx] = particles[i]
148
- idx += 1
159
+ # Deterministic copies (JIT-compiled)
160
+ resampled, idx = _resample_residual_deterministic(
161
+ particles.astype(np.float64), floor_Nw
162
+ )
149
163
 
150
164
  # Multinomial resampling of residuals
151
165
  if idx < N:
@@ -410,6 +424,31 @@ def particle_mean(
410
424
  return np.sum(weights[:, np.newaxis] * particles, axis=0)
411
425
 
412
426
 
427
+ @njit(cache=True)
428
+ def _particle_covariance_core(
429
+ particles: np.ndarray,
430
+ weights: np.ndarray,
431
+ mean: np.ndarray,
432
+ ) -> np.ndarray:
433
+ """JIT-compiled core for particle covariance computation."""
434
+ N = particles.shape[0]
435
+ n = particles.shape[1]
436
+ cov = np.zeros((n, n), dtype=np.float64)
437
+
438
+ for i in range(N):
439
+ w = weights[i]
440
+ for j in range(n):
441
+ diff_j = particles[i, j] - mean[j]
442
+ for k in range(j, n):
443
+ diff_k = particles[i, k] - mean[k]
444
+ val = w * diff_j * diff_k
445
+ cov[j, k] += val
446
+ if j != k:
447
+ cov[k, j] += val
448
+
449
+ return cov
450
+
451
+
413
452
  def particle_covariance(
414
453
  particles: NDArray[np.floating],
415
454
  weights: NDArray[np.floating],
@@ -435,12 +474,11 @@ def particle_covariance(
435
474
  if mean is None:
436
475
  mean = particle_mean(particles, weights)
437
476
 
438
- residuals = particles - mean
439
- cov = np.zeros((particles.shape[1], particles.shape[1]), dtype=np.float64)
440
- for i in range(len(particles)):
441
- cov += weights[i] * np.outer(residuals[i], residuals[i])
442
-
443
- return cov
477
+ return _particle_covariance_core(
478
+ particles.astype(np.float64),
479
+ weights.astype(np.float64),
480
+ mean.astype(np.float64),
481
+ )
444
482
 
445
483
 
446
484
  def initialize_particles(
@@ -17,11 +17,7 @@ from typing import List, NamedTuple, Optional
17
17
  import numpy as np
18
18
  from numpy.typing import ArrayLike, NDArray
19
19
 
20
- from pytcl.dynamic_estimation.kalman.linear import (
21
- kf_predict,
22
- kf_smooth,
23
- kf_update,
24
- )
20
+ from pytcl.dynamic_estimation.kalman.linear import kf_predict, kf_smooth, kf_update
25
21
 
26
22
 
27
23
  class SmoothedState(NamedTuple):
@@ -17,11 +17,7 @@ from pytcl.dynamic_models.discrete_time.polynomial import (
17
17
  f_piecewise_white_noise_jerk,
18
18
  f_poly_kal,
19
19
  )
20
- from pytcl.dynamic_models.discrete_time.singer import (
21
- f_singer,
22
- f_singer_2d,
23
- f_singer_3d,
24
- )
20
+ from pytcl.dynamic_models.discrete_time.singer import f_singer, f_singer_2d, f_singer_3d
25
21
 
26
22
  __all__ = [
27
23
  # Polynomial models
@@ -17,11 +17,7 @@ from pytcl.dynamic_models.process_noise.polynomial import (
17
17
  q_discrete_white_noise,
18
18
  q_poly_kal,
19
19
  )
20
- from pytcl.dynamic_models.process_noise.singer import (
21
- q_singer,
22
- q_singer_2d,
23
- q_singer_3d,
24
- )
20
+ from pytcl.dynamic_models.process_noise.singer import q_singer, q_singer_2d, q_singer_3d
25
21
 
26
22
  __all__ = [
27
23
  # Polynomial models
@@ -25,22 +25,11 @@ Examples
25
25
  >>> coef = create_emm_test_coefficients(n_max=36)
26
26
  """
27
27
 
28
- from pytcl.magnetism.emm import (
29
- EMM_PARAMETERS,
30
- HighResCoefficients,
31
- )
28
+ from pytcl.magnetism.emm import EMM_PARAMETERS, HighResCoefficients
32
29
  from pytcl.magnetism.emm import create_test_coefficients as create_emm_test_coefficients
33
- from pytcl.magnetism.emm import (
34
- emm,
35
- emm_declination,
36
- emm_inclination,
37
- emm_intensity,
38
- )
30
+ from pytcl.magnetism.emm import emm, emm_declination, emm_inclination, emm_intensity
39
31
  from pytcl.magnetism.emm import get_data_dir as get_emm_data_dir
40
- from pytcl.magnetism.emm import (
41
- load_emm_coefficients,
42
- wmmhr,
43
- )
32
+ from pytcl.magnetism.emm import load_emm_coefficients, wmmhr
44
33
  from pytcl.magnetism.igrf import (
45
34
  IGRF13,
46
35
  IGRFModel,
@@ -8,8 +8,8 @@ This module provides:
8
8
  - Spherical interpolation
9
9
  """
10
10
 
11
- from pytcl.mathematical_functions.interpolation.interpolation import ( # noqa: E501
12
- akima,
11
+ from pytcl.mathematical_functions.interpolation.interpolation import akima # noqa: E501
12
+ from pytcl.mathematical_functions.interpolation.interpolation import (
13
13
  barycentric,
14
14
  cubic_spline,
15
15
  interp1d,
@@ -40,8 +40,8 @@ from pytcl.mathematical_functions.special_functions.debye import (
40
40
  debye_entropy,
41
41
  debye_heat_capacity,
42
42
  )
43
- from pytcl.mathematical_functions.special_functions.elliptic import ( # noqa: E501
44
- ellipe,
43
+ from pytcl.mathematical_functions.special_functions.elliptic import ellipe # noqa: E501
44
+ from pytcl.mathematical_functions.special_functions.elliptic import (
45
45
  ellipeinc,
46
46
  ellipk,
47
47
  ellipkinc,
@@ -11,8 +11,10 @@ needed in tracking applications, including:
11
11
  - Rhumb line navigation
12
12
  """
13
13
 
14
- from pytcl.navigation.geodesy import ( # Ellipsoids; Coordinate conversions; Geodetic problems
15
- GRS80,
14
+ from pytcl.navigation.geodesy import (
15
+ GRS80, # Ellipsoids; Coordinate conversions; Geodetic problems
16
+ )
17
+ from pytcl.navigation.geodesy import (
16
18
  SPHERE,
17
19
  WGS84,
18
20
  Ellipsoid,
@@ -26,8 +28,8 @@ from pytcl.navigation.geodesy import ( # Ellipsoids; Coordinate conversions; Ge
26
28
  inverse_geodetic,
27
29
  ned_to_ecef,
28
30
  )
29
- from pytcl.navigation.great_circle import ( # Great circle navigation
30
- EARTH_RADIUS,
31
+ from pytcl.navigation.great_circle import EARTH_RADIUS # Great circle navigation
32
+ from pytcl.navigation.great_circle import (
31
33
  CrossTrackResult,
32
34
  GreatCircleResult,
33
35
  IntersectionResult,
@@ -45,8 +47,10 @@ from pytcl.navigation.great_circle import ( # Great circle navigation
45
47
  great_circle_waypoint,
46
48
  great_circle_waypoints,
47
49
  )
48
- from pytcl.navigation.ins import ( # Constants; State representation; Gravity and Earth rate
49
- A_EARTH,
50
+ from pytcl.navigation.ins import (
51
+ A_EARTH, # Constants; State representation; Gravity and Earth rate
52
+ )
53
+ from pytcl.navigation.ins import (
50
54
  B_EARTH,
51
55
  E2_EARTH,
52
56
  F_EARTH,
@@ -73,8 +77,8 @@ from pytcl.navigation.ins import ( # Constants; State representation; Gravity a
73
77
  update_attitude_ned,
74
78
  update_quaternion,
75
79
  )
76
- from pytcl.navigation.ins_gnss import ( # INS/GNSS integration
77
- GPS_L1_FREQ,
80
+ from pytcl.navigation.ins_gnss import GPS_L1_FREQ # INS/GNSS integration
81
+ from pytcl.navigation.ins_gnss import (
78
82
  GPS_L1_WAVELENGTH,
79
83
  SPEED_OF_LIGHT,
80
84
  GNSSMeasurement,
@@ -99,8 +103,8 @@ from pytcl.navigation.ins_gnss import ( # INS/GNSS integration
99
103
  tight_coupled_update,
100
104
  velocity_measurement_matrix,
101
105
  )
102
- from pytcl.navigation.rhumb import ( # Rhumb line navigation
103
- RhumbDirectResult,
106
+ from pytcl.navigation.rhumb import RhumbDirectResult # Rhumb line navigation
107
+ from pytcl.navigation.rhumb import (
104
108
  RhumbIntersectionResult,
105
109
  RhumbResult,
106
110
  compare_great_circle_rhumb,
pytcl/navigation/ins.py CHANGED
@@ -23,11 +23,7 @@ from typing import NamedTuple, Optional, Tuple
23
23
  import numpy as np
24
24
  from numpy.typing import ArrayLike, NDArray
25
25
 
26
- from pytcl.coordinate_systems.rotations import (
27
- quat2rotmat,
28
- quat_multiply,
29
- rotmat2quat,
30
- )
26
+ from pytcl.coordinate_systems.rotations import quat2rotmat, quat_multiply, rotmat2quat
31
27
  from pytcl.navigation.geodesy import WGS84, Ellipsoid
32
28
 
33
29
  # =============================================================================
@@ -16,20 +16,9 @@ from pytcl.trackers.hypothesis import (
16
16
  n_scan_prune,
17
17
  prune_hypotheses_by_probability,
18
18
  )
19
- from pytcl.trackers.mht import (
20
- MHTConfig,
21
- MHTResult,
22
- MHTTracker,
23
- )
24
- from pytcl.trackers.multi_target import (
25
- MultiTargetTracker,
26
- Track,
27
- TrackStatus,
28
- )
29
- from pytcl.trackers.single_target import (
30
- SingleTargetTracker,
31
- TrackState,
32
- )
19
+ from pytcl.trackers.mht import MHTConfig, MHTResult, MHTTracker
20
+ from pytcl.trackers.multi_target import MultiTargetTracker, Track, TrackStatus
21
+ from pytcl.trackers.single_target import SingleTargetTracker, TrackState
33
22
 
34
23
  __all__ = [
35
24
  # Single target
@@ -11,10 +11,7 @@ from typing import Callable, List, NamedTuple, Optional
11
11
  import numpy as np
12
12
  from numpy.typing import ArrayLike, NDArray
13
13
 
14
- from pytcl.assignment_algorithms import (
15
- chi2_gate_threshold,
16
- gnn_association,
17
- )
14
+ from pytcl.assignment_algorithms import chi2_gate_threshold, gnn_association
18
15
 
19
16
 
20
17
  class TrackStatus(Enum):