nrl-tracker 1.7.0__py3-none-any.whl → 1.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.3.dist-info}/METADATA +43 -3
  2. {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.3.dist-info}/RECORD +76 -76
  3. pytcl/__init__.py +2 -2
  4. pytcl/assignment_algorithms/__init__.py +15 -15
  5. pytcl/assignment_algorithms/gating.py +10 -10
  6. pytcl/assignment_algorithms/jpda.py +40 -40
  7. pytcl/assignment_algorithms/nd_assignment.py +5 -4
  8. pytcl/assignment_algorithms/network_flow.py +18 -8
  9. pytcl/assignment_algorithms/three_dimensional/assignment.py +3 -3
  10. pytcl/astronomical/__init__.py +9 -9
  11. pytcl/astronomical/ephemerides.py +14 -11
  12. pytcl/astronomical/reference_frames.py +8 -4
  13. pytcl/astronomical/relativity.py +6 -5
  14. pytcl/astronomical/special_orbits.py +9 -13
  15. pytcl/atmosphere/__init__.py +6 -6
  16. pytcl/atmosphere/nrlmsise00.py +153 -152
  17. pytcl/clustering/dbscan.py +2 -2
  18. pytcl/clustering/gaussian_mixture.py +3 -3
  19. pytcl/clustering/hierarchical.py +15 -15
  20. pytcl/clustering/kmeans.py +4 -4
  21. pytcl/containers/base.py +3 -3
  22. pytcl/containers/cluster_set.py +12 -2
  23. pytcl/containers/covertree.py +5 -3
  24. pytcl/containers/rtree.py +1 -1
  25. pytcl/containers/vptree.py +4 -2
  26. pytcl/coordinate_systems/conversions/geodetic.py +31 -7
  27. pytcl/coordinate_systems/jacobians/jacobians.py +2 -2
  28. pytcl/coordinate_systems/projections/__init__.py +1 -1
  29. pytcl/coordinate_systems/projections/projections.py +2 -2
  30. pytcl/coordinate_systems/rotations/rotations.py +10 -6
  31. pytcl/core/validation.py +3 -3
  32. pytcl/dynamic_estimation/__init__.py +16 -16
  33. pytcl/dynamic_estimation/gaussian_sum_filter.py +20 -38
  34. pytcl/dynamic_estimation/imm.py +14 -14
  35. pytcl/dynamic_estimation/kalman/__init__.py +1 -1
  36. pytcl/dynamic_estimation/kalman/constrained.py +35 -23
  37. pytcl/dynamic_estimation/kalman/extended.py +8 -8
  38. pytcl/dynamic_estimation/kalman/h_infinity.py +2 -2
  39. pytcl/dynamic_estimation/kalman/square_root.py +8 -2
  40. pytcl/dynamic_estimation/kalman/sr_ukf.py +3 -3
  41. pytcl/dynamic_estimation/kalman/ud_filter.py +11 -5
  42. pytcl/dynamic_estimation/kalman/unscented.py +8 -6
  43. pytcl/dynamic_estimation/particle_filters/bootstrap.py +15 -15
  44. pytcl/dynamic_estimation/rbpf.py +36 -40
  45. pytcl/gravity/spherical_harmonics.py +3 -3
  46. pytcl/gravity/tides.py +6 -6
  47. pytcl/logging_config.py +3 -3
  48. pytcl/magnetism/emm.py +10 -3
  49. pytcl/magnetism/wmm.py +4 -4
  50. pytcl/mathematical_functions/combinatorics/combinatorics.py +5 -5
  51. pytcl/mathematical_functions/geometry/geometry.py +5 -5
  52. pytcl/mathematical_functions/numerical_integration/quadrature.py +6 -6
  53. pytcl/mathematical_functions/signal_processing/detection.py +24 -24
  54. pytcl/mathematical_functions/signal_processing/filters.py +14 -14
  55. pytcl/mathematical_functions/signal_processing/matched_filter.py +12 -12
  56. pytcl/mathematical_functions/special_functions/bessel.py +15 -3
  57. pytcl/mathematical_functions/special_functions/debye.py +5 -1
  58. pytcl/mathematical_functions/special_functions/error_functions.py +3 -1
  59. pytcl/mathematical_functions/special_functions/gamma_functions.py +4 -4
  60. pytcl/mathematical_functions/special_functions/hypergeometric.py +6 -4
  61. pytcl/mathematical_functions/transforms/fourier.py +8 -8
  62. pytcl/mathematical_functions/transforms/stft.py +12 -12
  63. pytcl/mathematical_functions/transforms/wavelets.py +9 -9
  64. pytcl/navigation/geodesy.py +3 -3
  65. pytcl/navigation/great_circle.py +5 -5
  66. pytcl/plotting/coordinates.py +7 -7
  67. pytcl/plotting/tracks.py +2 -2
  68. pytcl/static_estimation/maximum_likelihood.py +16 -14
  69. pytcl/static_estimation/robust.py +5 -5
  70. pytcl/terrain/loaders.py +5 -5
  71. pytcl/trackers/hypothesis.py +1 -1
  72. pytcl/trackers/mht.py +9 -9
  73. pytcl/trackers/multi_target.py +1 -1
  74. {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.3.dist-info}/LICENSE +0 -0
  75. {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.3.dist-info}/WHEEL +0 -0
  76. {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.3.dist-info}/top_level.txt +0 -0
@@ -12,7 +12,7 @@ References
12
12
  with Noise," KDD 1996.
13
13
  """
14
14
 
15
- from typing import List, NamedTuple, Set
15
+ from typing import Any, List, NamedTuple, Set
16
16
 
17
17
  import numpy as np
18
18
  from numba import njit
@@ -42,7 +42,7 @@ class DBSCANResult(NamedTuple):
42
42
 
43
43
 
44
44
  @njit(cache=True)
45
- def _compute_distance_matrix(X: np.ndarray) -> np.ndarray:
45
+ def _compute_distance_matrix(X: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
46
46
  """Compute pairwise Euclidean distance matrix (JIT-compiled)."""
47
47
  n = X.shape[0]
48
48
  dist = np.zeros((n, n), dtype=np.float64)
@@ -674,9 +674,9 @@ class GaussianMixture:
674
674
 
675
675
  def _gaussian_pdf(
676
676
  self,
677
- x: NDArray,
678
- mean: NDArray,
679
- cov: NDArray,
677
+ x: NDArray[np.floating],
678
+ mean: NDArray[np.floating],
679
+ cov: NDArray[np.floating],
680
680
  ) -> float:
681
681
  """Evaluate single Gaussian PDF."""
682
682
  n = len(x)
@@ -12,7 +12,7 @@ References
12
12
  """
13
13
 
14
14
  from enum import Enum
15
- from typing import List, Literal, NamedTuple, Optional
15
+ from typing import Any, List, Literal, NamedTuple, Optional
16
16
 
17
17
  import numpy as np
18
18
  from numba import njit
@@ -72,7 +72,7 @@ class HierarchicalResult(NamedTuple):
72
72
 
73
73
 
74
74
  @njit(cache=True)
75
- def _compute_distance_matrix_jit(X: np.ndarray) -> np.ndarray:
75
+ def _compute_distance_matrix_jit(X: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
76
76
  """JIT-compiled pairwise Euclidean distance computation."""
77
77
  n = X.shape[0]
78
78
  n_features = X.shape[1]
@@ -112,43 +112,43 @@ def compute_distance_matrix(
112
112
 
113
113
 
114
114
  def _single_linkage(
115
- dist_i: NDArray,
116
- dist_j: NDArray,
115
+ dist_i: NDArray[Any],
116
+ dist_j: NDArray[Any],
117
117
  size_i: int,
118
118
  size_j: int,
119
- ) -> NDArray:
119
+ ) -> NDArray[Any]:
120
120
  """Single linkage: minimum of distances."""
121
121
  return np.minimum(dist_i, dist_j)
122
122
 
123
123
 
124
124
  def _complete_linkage(
125
- dist_i: NDArray,
126
- dist_j: NDArray,
125
+ dist_i: NDArray[Any],
126
+ dist_j: NDArray[Any],
127
127
  size_i: int,
128
128
  size_j: int,
129
- ) -> NDArray:
129
+ ) -> NDArray[Any]:
130
130
  """Complete linkage: maximum of distances."""
131
131
  return np.maximum(dist_i, dist_j)
132
132
 
133
133
 
134
134
  def _average_linkage(
135
- dist_i: NDArray,
136
- dist_j: NDArray,
135
+ dist_i: NDArray[Any],
136
+ dist_j: NDArray[Any],
137
137
  size_i: int,
138
138
  size_j: int,
139
- ) -> NDArray:
139
+ ) -> NDArray[Any]:
140
140
  """Average linkage: weighted average of distances."""
141
141
  return (size_i * dist_i + size_j * dist_j) / (size_i + size_j)
142
142
 
143
143
 
144
144
  def _ward_linkage(
145
- dist_i: NDArray,
146
- dist_j: NDArray,
145
+ dist_i: NDArray[Any],
146
+ dist_j: NDArray[Any],
147
147
  size_i: int,
148
148
  size_j: int,
149
- size_k: NDArray,
149
+ size_k: NDArray[Any],
150
150
  dist_ij: float,
151
- ) -> NDArray:
151
+ ) -> NDArray[Any]:
152
152
  """Ward's linkage: minimum variance merge."""
153
153
  total = size_i + size_j + size_k
154
154
  return np.sqrt(
@@ -10,7 +10,7 @@ References
10
10
  Careful Seeding," SODA 2007.
11
11
  """
12
12
 
13
- from typing import Literal, NamedTuple, Optional, Union
13
+ from typing import Any, Literal, NamedTuple, Optional, Union
14
14
 
15
15
  import numpy as np
16
16
  from numpy.typing import ArrayLike, NDArray
@@ -305,7 +305,7 @@ def _kmeans_single(
305
305
 
306
306
  # Handle empty clusters: keep old center
307
307
  for k in range(n_clusters):
308
- if np.all(new_centers[k] == 0) and np.any(labels == k) is False:
308
+ if np.all(new_centers[k] == 0) and not np.any(labels == k):
309
309
  new_centers[k] = centers[k]
310
310
 
311
311
  # Check convergence
@@ -336,8 +336,8 @@ def _kmeans_single(
336
336
  def kmeans_elbow(
337
337
  X: ArrayLike,
338
338
  k_range: Optional[range] = None,
339
- **kwargs,
340
- ) -> dict:
339
+ **kwargs: Any,
340
+ ) -> dict[str, Any]:
341
341
  """
342
342
  Compute K-means for a range of k values for elbow method.
343
343
 
pytcl/containers/base.py CHANGED
@@ -8,7 +8,7 @@ Cover trees.
8
8
 
9
9
  import logging
10
10
  from abc import ABC, abstractmethod
11
- from typing import Callable, List, NamedTuple, Optional
11
+ from typing import Any, Callable, List, NamedTuple, Optional
12
12
 
13
13
  import numpy as np
14
14
  from numpy.typing import ArrayLike, NDArray
@@ -155,7 +155,7 @@ class MetricSpatialIndex(BaseSpatialIndex):
155
155
  def __init__(
156
156
  self,
157
157
  data: ArrayLike,
158
- metric: Optional[Callable[[NDArray, NDArray], float]] = None,
158
+ metric: Optional[Callable[[NDArray[Any], NDArray[Any]], float]] = None,
159
159
  ):
160
160
  super().__init__(data)
161
161
 
@@ -165,7 +165,7 @@ class MetricSpatialIndex(BaseSpatialIndex):
165
165
  self.metric = metric
166
166
 
167
167
  @staticmethod
168
- def _euclidean_distance(x: NDArray, y: NDArray) -> float:
168
+ def _euclidean_distance(x: NDArray[Any], y: NDArray[Any]) -> float:
169
169
  """Default Euclidean distance metric."""
170
170
  return float(np.sqrt(np.sum((x - y) ** 2)))
171
171
 
@@ -7,7 +7,17 @@ that move together (formations, convoys, etc.).
7
7
 
8
8
  from __future__ import annotations
9
9
 
10
- from typing import Dict, Iterable, Iterator, List, NamedTuple, Optional, Tuple, Union
10
+ from typing import (
11
+ Any,
12
+ Dict,
13
+ Iterable,
14
+ Iterator,
15
+ List,
16
+ NamedTuple,
17
+ Optional,
18
+ Tuple,
19
+ Union,
20
+ )
11
21
 
12
22
  import numpy as np
13
23
  from numpy.typing import ArrayLike, NDArray
@@ -303,7 +313,7 @@ class ClusterSet:
303
313
  cls,
304
314
  tracks: TrackList,
305
315
  method: str = "dbscan",
306
- **kwargs,
316
+ **kwargs: Any,
307
317
  ) -> ClusterSet:
308
318
  """
309
319
  Create a ClusterSet by clustering tracks.
@@ -12,7 +12,7 @@ References
12
12
  """
13
13
 
14
14
  import logging
15
- from typing import Callable, List, NamedTuple, Optional, Set, Tuple
15
+ from typing import Any, Callable, List, NamedTuple, Optional, Set, Tuple
16
16
 
17
17
  import numpy as np
18
18
  from numpy.typing import ArrayLike, NDArray
@@ -109,7 +109,9 @@ class CoverTree(MetricSpatialIndex):
109
109
  def __init__(
110
110
  self,
111
111
  data: ArrayLike,
112
- metric: Optional[Callable[[NDArray, NDArray], float]] = None,
112
+ metric: Optional[
113
+ Callable[[np.ndarray[Any, Any], np.ndarray[Any, Any]], float]
114
+ ] = None,
113
115
  base: float = 2.0,
114
116
  ):
115
117
  super().__init__(data, metric)
@@ -141,7 +143,7 @@ class CoverTree(MetricSpatialIndex):
141
143
  self._distance_cache[key] = self.metric(self.data[i], self.data[j])
142
144
  return self._distance_cache[key]
143
145
 
144
- def _distance_to_point(self, idx: int, query: NDArray) -> float:
146
+ def _distance_to_point(self, idx: int, query: NDArray[np.floating]) -> float:
145
147
  """Distance from data point to query point."""
146
148
  return self.metric(self.data[idx], query)
147
149
 
pytcl/containers/rtree.py CHANGED
@@ -654,7 +654,7 @@ class RTree:
654
654
  query = np.asarray(query_point, dtype=np.float64)
655
655
  neighbors: List[Tuple[float, int]] = []
656
656
 
657
- def min_dist_to_box(point: NDArray, bbox: BoundingBox) -> float:
657
+ def min_dist_to_box(point: NDArray[np.floating], bbox: BoundingBox) -> float:
658
658
  """Minimum distance from point to bounding box."""
659
659
  clamped = np.clip(point, bbox.min_coords, bbox.max_coords)
660
660
  return float(np.sqrt(np.sum((point - clamped) ** 2)))
@@ -12,7 +12,7 @@ References
12
12
  """
13
13
 
14
14
  import logging
15
- from typing import Callable, List, NamedTuple, Optional, Tuple
15
+ from typing import Any, Callable, List, NamedTuple, Optional, Tuple
16
16
 
17
17
  import numpy as np
18
18
  from numpy.typing import ArrayLike, NDArray
@@ -105,7 +105,9 @@ class VPTree(MetricSpatialIndex):
105
105
  def __init__(
106
106
  self,
107
107
  data: ArrayLike,
108
- metric: Optional[Callable[[NDArray, NDArray], float]] = None,
108
+ metric: Optional[
109
+ Callable[[np.ndarray[Any, Any], np.ndarray[Any, Any]], float]
110
+ ] = None,
109
111
  ):
110
112
  super().__init__(data, metric)
111
113
 
@@ -166,7 +166,7 @@ def ecef2geodetic(
166
166
 
167
167
  # Altitude
168
168
  # Use cos_lat when available, otherwise use sin_lat with guard against division by zero
169
- with np.errstate(divide='ignore', invalid='ignore'):
169
+ with np.errstate(divide="ignore", invalid="ignore"):
170
170
  alt = np.where(
171
171
  np.abs(cos_lat) > 1e-10,
172
172
  p / cos_lat - N,
@@ -651,14 +651,30 @@ def ecef2sez(
651
651
  # Z = cos(lat)*cos(lon)*dX + cos(lat)*sin(lon)*dY + sin(lat)*dZ
652
652
 
653
653
  if delta_ecef.ndim == 1:
654
- s = -sin_lat * cos_lon * delta_ecef[0] - sin_lat * sin_lon * delta_ecef[1] + cos_lat * delta_ecef[2]
654
+ s = (
655
+ -sin_lat * cos_lon * delta_ecef[0]
656
+ - sin_lat * sin_lon * delta_ecef[1]
657
+ + cos_lat * delta_ecef[2]
658
+ )
655
659
  e = -sin_lon * delta_ecef[0] + cos_lon * delta_ecef[1]
656
- z = cos_lat * cos_lon * delta_ecef[0] + cos_lat * sin_lon * delta_ecef[1] + sin_lat * delta_ecef[2]
660
+ z = (
661
+ cos_lat * cos_lon * delta_ecef[0]
662
+ + cos_lat * sin_lon * delta_ecef[1]
663
+ + sin_lat * delta_ecef[2]
664
+ )
657
665
  return np.array([s, e, z], dtype=np.float64)
658
666
  else:
659
- s = -sin_lat * cos_lon * delta_ecef[0, :] - sin_lat * sin_lon * delta_ecef[1, :] + cos_lat * delta_ecef[2, :]
667
+ s = (
668
+ -sin_lat * cos_lon * delta_ecef[0, :]
669
+ - sin_lat * sin_lon * delta_ecef[1, :]
670
+ + cos_lat * delta_ecef[2, :]
671
+ )
660
672
  e = -sin_lon * delta_ecef[0, :] + cos_lon * delta_ecef[1, :]
661
- z = cos_lat * cos_lon * delta_ecef[0, :] + cos_lat * sin_lon * delta_ecef[1, :] + sin_lat * delta_ecef[2, :]
673
+ z = (
674
+ cos_lat * cos_lon * delta_ecef[0, :]
675
+ + cos_lat * sin_lon * delta_ecef[1, :]
676
+ + sin_lat * delta_ecef[2, :]
677
+ )
662
678
  return np.array([s, e, z], dtype=np.float64)
663
679
 
664
680
 
@@ -714,8 +730,16 @@ def sez2ecef(
714
730
  else:
715
731
  if sez.shape[0] != 3:
716
732
  sez = sez.T
717
- dX = -sin_lat * cos_lon * sez[0, :] - sin_lon * sez[1, :] + cos_lat * cos_lon * sez[2, :]
718
- dY = -sin_lat * sin_lon * sez[0, :] + cos_lon * sez[1, :] + cos_lat * sin_lon * sez[2, :]
733
+ dX = (
734
+ -sin_lat * cos_lon * sez[0, :]
735
+ - sin_lon * sez[1, :]
736
+ + cos_lat * cos_lon * sez[2, :]
737
+ )
738
+ dY = (
739
+ -sin_lat * sin_lon * sez[0, :]
740
+ + cos_lon * sez[1, :]
741
+ + cos_lat * sin_lon * sez[2, :]
742
+ )
719
743
  dZ = cos_lat * sez[0, :] + sin_lat * sez[2, :]
720
744
  return ecef_ref[:, np.newaxis] + np.array([dX, dY, dZ], dtype=np.float64)
721
745
 
@@ -6,7 +6,7 @@ coordinate transformations, essential for error propagation in tracking
6
6
  filters (e.g., converting measurement covariances between coordinate systems).
7
7
  """
8
8
 
9
- from typing import Literal
9
+ from typing import Callable, Literal
10
10
 
11
11
  import numpy as np
12
12
  from numpy.typing import ArrayLike, NDArray
@@ -431,7 +431,7 @@ def cross_covariance_transform(
431
431
 
432
432
 
433
433
  def numerical_jacobian(
434
- func,
434
+ func: Callable[[ArrayLike], ArrayLike],
435
435
  x: ArrayLike,
436
436
  dx: float = 1e-7,
437
437
  ) -> NDArray[np.floating]:
@@ -27,7 +27,7 @@ Examples
27
27
  """
28
28
 
29
29
  from pytcl.coordinate_systems.projections.projections import (
30
- WGS84_A, # Constants; Result types; Azimuthal Equidistant; UTM; Lambert Conformal Conic; Mercator; Stereographic; Transverse Mercator
30
+ WGS84_A, # Constants; Result types
31
31
  )
32
32
  from pytcl.coordinate_systems.projections.projections import (
33
33
  WGS84_B,
@@ -25,7 +25,7 @@ References
25
25
  nanometers." Journal of Geodesy 85.8 (2011): 475-485.
26
26
  """
27
27
 
28
- from typing import NamedTuple, Optional, Tuple
28
+ from typing import Any, NamedTuple, Optional, Tuple
29
29
 
30
30
  import numpy as np
31
31
  from numpy.typing import NDArray
@@ -1253,7 +1253,7 @@ def geodetic2utm_batch(
1253
1253
  lats: NDArray[np.floating],
1254
1254
  lons: NDArray[np.floating],
1255
1255
  zone: Optional[int] = None,
1256
- ) -> Tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.int_], NDArray]:
1256
+ ) -> tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.intp], NDArray[Any]]:
1257
1257
  """
1258
1258
  Batch convert geodetic coordinates to UTM.
1259
1259
 
@@ -6,7 +6,7 @@ representations including rotation matrices, quaternions, Euler angles,
6
6
  axis-angle, and Rodrigues parameters.
7
7
  """
8
8
 
9
- from typing import Tuple
9
+ from typing import Any, Tuple
10
10
 
11
11
  import numpy as np
12
12
  from numba import njit
@@ -14,7 +14,7 @@ from numpy.typing import ArrayLike, NDArray
14
14
 
15
15
 
16
16
  @njit(cache=True, fastmath=True)
17
- def _rotx_inplace(angle: float, R: np.ndarray) -> None:
17
+ def _rotx_inplace(angle: float, R: np.ndarray[Any, Any]) -> None:
18
18
  """JIT-compiled rotation about x-axis (fills existing matrix)."""
19
19
  c = np.cos(angle)
20
20
  s = np.sin(angle)
@@ -30,7 +30,7 @@ def _rotx_inplace(angle: float, R: np.ndarray) -> None:
30
30
 
31
31
 
32
32
  @njit(cache=True, fastmath=True)
33
- def _roty_inplace(angle: float, R: np.ndarray) -> None:
33
+ def _roty_inplace(angle: float, R: np.ndarray[Any, Any]) -> None:
34
34
  """JIT-compiled rotation about y-axis (fills existing matrix)."""
35
35
  c = np.cos(angle)
36
36
  s = np.sin(angle)
@@ -46,7 +46,7 @@ def _roty_inplace(angle: float, R: np.ndarray) -> None:
46
46
 
47
47
 
48
48
  @njit(cache=True, fastmath=True)
49
- def _rotz_inplace(angle: float, R: np.ndarray) -> None:
49
+ def _rotz_inplace(angle: float, R: np.ndarray[Any, Any]) -> None:
50
50
  """JIT-compiled rotation about z-axis (fills existing matrix)."""
51
51
  c = np.cos(angle)
52
52
  s = np.sin(angle)
@@ -62,7 +62,9 @@ def _rotz_inplace(angle: float, R: np.ndarray) -> None:
62
62
 
63
63
 
64
64
  @njit(cache=True, fastmath=True)
65
- def _euler_zyx_to_rotmat(yaw: float, pitch: float, roll: float, R: np.ndarray) -> None:
65
+ def _euler_zyx_to_rotmat(
66
+ yaw: float, pitch: float, roll: float, R: np.ndarray[Any, Any]
67
+ ) -> None:
66
68
  """JIT-compiled ZYX Euler angles to rotation matrix."""
67
69
  cy = np.cos(yaw)
68
70
  sy = np.sin(yaw)
@@ -84,7 +86,9 @@ def _euler_zyx_to_rotmat(yaw: float, pitch: float, roll: float, R: np.ndarray) -
84
86
 
85
87
 
86
88
  @njit(cache=True, fastmath=True)
87
- def _matmul_3x3(A: np.ndarray, B: np.ndarray, C: np.ndarray) -> None:
89
+ def _matmul_3x3(
90
+ A: np.ndarray[Any, Any], B: np.ndarray[Any, Any], C: np.ndarray[Any, Any]
91
+ ) -> None:
88
92
  """JIT-compiled 3x3 matrix multiplication C = A @ B."""
89
93
  for i in range(3):
90
94
  for j in range(3):
pytcl/core/validation.py CHANGED
@@ -28,7 +28,7 @@ def validate_array(
28
28
  arr: ArrayLike,
29
29
  name: str = "array",
30
30
  *,
31
- dtype: type | np.dtype | None = None,
31
+ dtype: type | np.dtype[Any] | None = None,
32
32
  ndim: int | tuple[int, ...] | None = None,
33
33
  shape: tuple[int | None, ...] | None = None,
34
34
  min_ndim: int | None = None,
@@ -415,7 +415,7 @@ def validate_same_shape(*arrays: ArrayLike, names: Sequence[str] | None = None)
415
415
  def validated_array_input(
416
416
  param_name: str,
417
417
  *,
418
- dtype: type | np.dtype | None = None,
418
+ dtype: type | np.dtype[Any] | None = None,
419
419
  ndim: int | tuple[int, ...] | None = None,
420
420
  shape: tuple[int | None, ...] | None = None,
421
421
  finite: bool = False,
@@ -516,7 +516,7 @@ class ArraySpec:
516
516
  def __init__(
517
517
  self,
518
518
  *,
519
- dtype: type | np.dtype | None = None,
519
+ dtype: type | np.dtype[Any] | None = None,
520
520
  ndim: int | tuple[int, ...] | None = None,
521
521
  shape: tuple[int | None, ...] | None = None,
522
522
  min_ndim: int | None = None,
@@ -14,6 +14,14 @@ This module provides filtering and smoothing algorithms for state estimation:
14
14
  # Import submodules for easy access
15
15
  from pytcl.dynamic_estimation import kalman, particle_filters
16
16
 
17
+ # Gaussian Sum Filter
18
+ from pytcl.dynamic_estimation.gaussian_sum_filter import (
19
+ GaussianComponent,
20
+ GaussianSumFilter,
21
+ gaussian_sum_filter_predict,
22
+ gaussian_sum_filter_update,
23
+ )
24
+
17
25
  # IMM estimator
18
26
  from pytcl.dynamic_estimation.imm import (
19
27
  IMMEstimator,
@@ -25,14 +33,6 @@ from pytcl.dynamic_estimation.imm import (
25
33
  imm_update,
26
34
  )
27
35
 
28
- # Gaussian Sum Filter
29
- from pytcl.dynamic_estimation.gaussian_sum_filter import (
30
- GaussianComponent,
31
- GaussianSumFilter,
32
- gaussian_sum_filter_predict,
33
- gaussian_sum_filter_update,
34
- )
35
-
36
36
  # Information filter
37
37
  from pytcl.dynamic_estimation.information_filter import (
38
38
  InformationFilterResult,
@@ -93,14 +93,6 @@ from pytcl.dynamic_estimation.kalman import (
93
93
  unscented_transform,
94
94
  )
95
95
 
96
- # Rao-Blackwellized Particle Filter
97
- from pytcl.dynamic_estimation.rbpf import (
98
- RBPFFilter,
99
- RBPFParticle,
100
- rbpf_predict,
101
- rbpf_update,
102
- )
103
-
104
96
  # Particle filters
105
97
  from pytcl.dynamic_estimation.particle_filters import (
106
98
  ParticleState,
@@ -117,6 +109,14 @@ from pytcl.dynamic_estimation.particle_filters import (
117
109
  resample_systematic,
118
110
  )
119
111
 
112
+ # Rao-Blackwellized Particle Filter
113
+ from pytcl.dynamic_estimation.rbpf import (
114
+ RBPFFilter,
115
+ RBPFParticle,
116
+ rbpf_predict,
117
+ rbpf_update,
118
+ )
119
+
120
120
  # Smoothers
121
121
  from pytcl.dynamic_estimation.smoothers import (
122
122
  FixedLagResult,
@@ -31,8 +31,8 @@ from pytcl.dynamic_estimation.kalman.extended import ekf_predict, ekf_update
31
31
  class GaussianComponent(NamedTuple):
32
32
  """Single Gaussian component in mixture."""
33
33
 
34
- x: NDArray # State estimate
35
- P: NDArray # Covariance
34
+ x: NDArray[np.floating] # State estimate
35
+ P: NDArray[np.floating] # Covariance
36
36
  w: float # Weight (probability)
37
37
 
38
38
 
@@ -108,15 +108,13 @@ class GaussianSumFilter:
108
108
  x = x0.copy()
109
109
  else:
110
110
  # Slight perturbation for diversity
111
- x = x0 + np.random.randn(x0.shape[0]) * np.sqrt(
112
- np.diag(P0)
113
- ) * 0.1
111
+ x = x0 + np.random.randn(x0.shape[0]) * np.sqrt(np.diag(P0)) * 0.1
114
112
 
115
113
  self.components.append(GaussianComponent(x=x, P=P0.copy(), w=weight))
116
114
 
117
115
  def predict(
118
116
  self,
119
- f: Callable,
117
+ f: Callable[[NDArray[np.floating]], NDArray[np.floating]],
120
118
  F: ArrayLike,
121
119
  Q: ArrayLike,
122
120
  ) -> None:
@@ -138,16 +136,14 @@ class GaussianSumFilter:
138
136
  for comp in self.components:
139
137
  # EKF predict for each component
140
138
  pred = ekf_predict(comp.x, comp.P, f, F, Q)
141
- new_components.append(
142
- GaussianComponent(x=pred.x, P=pred.P, w=comp.w)
143
- )
139
+ new_components.append(GaussianComponent(x=pred.x, P=pred.P, w=comp.w))
144
140
 
145
141
  self.components = new_components
146
142
 
147
143
  def update(
148
144
  self,
149
145
  z: ArrayLike,
150
- h: Callable,
146
+ h: Callable[[NDArray[np.floating]], NDArray[np.floating]],
151
147
  H: ArrayLike,
152
148
  R: ArrayLike,
153
149
  ) -> None:
@@ -179,9 +175,7 @@ class GaussianSumFilter:
179
175
  # Likelihood from this measurement
180
176
  likelihood = upd.likelihood
181
177
 
182
- updated_components.append(
183
- GaussianComponent(x=upd.x, P=upd.P, w=comp.w)
184
- )
178
+ updated_components.append(GaussianComponent(x=upd.x, P=upd.P, w=comp.w))
185
179
  likelihoods.append(likelihood)
186
180
 
187
181
  # Adapt weights based on measurement likelihood
@@ -210,15 +204,11 @@ class GaussianSumFilter:
210
204
 
211
205
  def _prune_components(self) -> None:
212
206
  """Remove components with weight below threshold."""
213
- self.components = [
214
- c for c in self.components if c.w >= self.prune_threshold
215
- ]
207
+ self.components = [c for c in self.components if c.w >= self.prune_threshold]
216
208
 
217
209
  if len(self.components) == 0:
218
210
  # Failsafe: keep best component
219
- self.components = [
220
- max(self.components, key=lambda c: c.w)
221
- ]
211
+ self.components = [max(self.components, key=lambda c: c.w)]
222
212
 
223
213
  # Renormalize weights
224
214
  total_weight = sum(c.w for c in self.components)
@@ -232,13 +222,11 @@ class GaussianSumFilter:
232
222
  """Merge similar components to keep count manageable."""
233
223
  while len(self.components) > self.max_components:
234
224
  # Find pair with smallest KL divergence
235
- best_i, best_j, best_kl = 0, 1, float('inf')
225
+ best_i, best_j, best_kl = 0, 1, float("inf")
236
226
 
237
227
  for i in range(len(self.components)):
238
228
  for j in range(i + 1, len(self.components)):
239
- kl = self._kl_divergence(
240
- self.components[i], self.components[j]
241
- )
229
+ kl = self._kl_divergence(self.components[i], self.components[j])
242
230
  if kl < best_kl:
243
231
  best_kl = kl
244
232
  best_i = best_j = i
@@ -261,8 +249,7 @@ class GaussianSumFilter:
261
249
  dx_i = ci.x - x_new
262
250
  dx_j = cj.x - x_new
263
251
  P_new += (
264
- ci.w * np.outer(dx_i, dx_i)
265
- + cj.w * np.outer(dx_j, dx_j)
252
+ ci.w * np.outer(dx_i, dx_i) + cj.w * np.outer(dx_j, dx_j)
266
253
  ) / w_new
267
254
 
268
255
  # Create merged component
@@ -270,7 +257,8 @@ class GaussianSumFilter:
270
257
 
271
258
  # Replace with merged, remove old
272
259
  self.components = [
273
- c for i, c in enumerate(self.components)
260
+ c
261
+ for i, c in enumerate(self.components)
274
262
  if i != best_i and i != best_j
275
263
  ]
276
264
  self.components.append(merged)
@@ -300,9 +288,7 @@ class GaussianSumFilter:
300
288
 
301
289
  try:
302
290
  P2_inv = np.linalg.inv(c2.P)
303
- logdet_ratio = np.linalg.slogdet(c2.P)[1] - np.linalg.slogdet(
304
- c1.P
305
- )[1]
291
+ logdet_ratio = np.linalg.slogdet(c2.P)[1] - np.linalg.slogdet(c1.P)[1]
306
292
 
307
293
  trace_term = np.trace(P2_inv @ c1.P)
308
294
  quad_term = dx @ P2_inv @ dx
@@ -313,7 +299,7 @@ class GaussianSumFilter:
313
299
  # Singular matrix, return large KL
314
300
  return 1e6
315
301
 
316
- def estimate(self) -> tuple[NDArray, NDArray]:
302
+ def estimate(self) -> tuple[NDArray[np.floating], NDArray[np.floating]]:
317
303
  """Get overall state estimate (weighted mean and covariance).
318
304
 
319
305
  Returns
@@ -356,7 +342,7 @@ class GaussianSumFilter:
356
342
 
357
343
  def gaussian_sum_filter_predict(
358
344
  components: List[GaussianComponent],
359
- f: Callable,
345
+ f: Callable[[NDArray[np.floating]], NDArray[np.floating]],
360
346
  F: ArrayLike,
361
347
  Q: ArrayLike,
362
348
  ) -> List[GaussianComponent]:
@@ -384,9 +370,7 @@ def gaussian_sum_filter_predict(
384
370
  new_components = []
385
371
  for comp in components:
386
372
  pred = ekf_predict(comp.x, comp.P, f, F, Q)
387
- new_components.append(
388
- GaussianComponent(x=pred.x, P=pred.P, w=comp.w)
389
- )
373
+ new_components.append(GaussianComponent(x=pred.x, P=pred.P, w=comp.w))
390
374
 
391
375
  return new_components
392
376
 
@@ -394,7 +378,7 @@ def gaussian_sum_filter_predict(
394
378
  def gaussian_sum_filter_update(
395
379
  components: List[GaussianComponent],
396
380
  z: ArrayLike,
397
- h: Callable,
381
+ h: Callable[[NDArray[np.floating]], NDArray[np.floating]],
398
382
  H: ArrayLike,
399
383
  R: ArrayLike,
400
384
  ) -> List[GaussianComponent]:
@@ -429,9 +413,7 @@ def gaussian_sum_filter_update(
429
413
  upd = ekf_update(comp.x, comp.P, z, h, H, R)
430
414
  likelihood = upd.likelihood
431
415
 
432
- updated_components.append(
433
- GaussianComponent(x=upd.x, P=upd.P, w=comp.w)
434
- )
416
+ updated_components.append(GaussianComponent(x=upd.x, P=upd.P, w=comp.w))
435
417
  likelihoods.append(likelihood)
436
418
 
437
419
  # Adapt weights