nrl-tracker 1.7.0__py3-none-any.whl → 1.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.3.dist-info}/METADATA +43 -3
  2. {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.3.dist-info}/RECORD +76 -76
  3. pytcl/__init__.py +2 -2
  4. pytcl/assignment_algorithms/__init__.py +15 -15
  5. pytcl/assignment_algorithms/gating.py +10 -10
  6. pytcl/assignment_algorithms/jpda.py +40 -40
  7. pytcl/assignment_algorithms/nd_assignment.py +5 -4
  8. pytcl/assignment_algorithms/network_flow.py +18 -8
  9. pytcl/assignment_algorithms/three_dimensional/assignment.py +3 -3
  10. pytcl/astronomical/__init__.py +9 -9
  11. pytcl/astronomical/ephemerides.py +14 -11
  12. pytcl/astronomical/reference_frames.py +8 -4
  13. pytcl/astronomical/relativity.py +6 -5
  14. pytcl/astronomical/special_orbits.py +9 -13
  15. pytcl/atmosphere/__init__.py +6 -6
  16. pytcl/atmosphere/nrlmsise00.py +153 -152
  17. pytcl/clustering/dbscan.py +2 -2
  18. pytcl/clustering/gaussian_mixture.py +3 -3
  19. pytcl/clustering/hierarchical.py +15 -15
  20. pytcl/clustering/kmeans.py +4 -4
  21. pytcl/containers/base.py +3 -3
  22. pytcl/containers/cluster_set.py +12 -2
  23. pytcl/containers/covertree.py +5 -3
  24. pytcl/containers/rtree.py +1 -1
  25. pytcl/containers/vptree.py +4 -2
  26. pytcl/coordinate_systems/conversions/geodetic.py +31 -7
  27. pytcl/coordinate_systems/jacobians/jacobians.py +2 -2
  28. pytcl/coordinate_systems/projections/__init__.py +1 -1
  29. pytcl/coordinate_systems/projections/projections.py +2 -2
  30. pytcl/coordinate_systems/rotations/rotations.py +10 -6
  31. pytcl/core/validation.py +3 -3
  32. pytcl/dynamic_estimation/__init__.py +16 -16
  33. pytcl/dynamic_estimation/gaussian_sum_filter.py +20 -38
  34. pytcl/dynamic_estimation/imm.py +14 -14
  35. pytcl/dynamic_estimation/kalman/__init__.py +1 -1
  36. pytcl/dynamic_estimation/kalman/constrained.py +35 -23
  37. pytcl/dynamic_estimation/kalman/extended.py +8 -8
  38. pytcl/dynamic_estimation/kalman/h_infinity.py +2 -2
  39. pytcl/dynamic_estimation/kalman/square_root.py +8 -2
  40. pytcl/dynamic_estimation/kalman/sr_ukf.py +3 -3
  41. pytcl/dynamic_estimation/kalman/ud_filter.py +11 -5
  42. pytcl/dynamic_estimation/kalman/unscented.py +8 -6
  43. pytcl/dynamic_estimation/particle_filters/bootstrap.py +15 -15
  44. pytcl/dynamic_estimation/rbpf.py +36 -40
  45. pytcl/gravity/spherical_harmonics.py +3 -3
  46. pytcl/gravity/tides.py +6 -6
  47. pytcl/logging_config.py +3 -3
  48. pytcl/magnetism/emm.py +10 -3
  49. pytcl/magnetism/wmm.py +4 -4
  50. pytcl/mathematical_functions/combinatorics/combinatorics.py +5 -5
  51. pytcl/mathematical_functions/geometry/geometry.py +5 -5
  52. pytcl/mathematical_functions/numerical_integration/quadrature.py +6 -6
  53. pytcl/mathematical_functions/signal_processing/detection.py +24 -24
  54. pytcl/mathematical_functions/signal_processing/filters.py +14 -14
  55. pytcl/mathematical_functions/signal_processing/matched_filter.py +12 -12
  56. pytcl/mathematical_functions/special_functions/bessel.py +15 -3
  57. pytcl/mathematical_functions/special_functions/debye.py +5 -1
  58. pytcl/mathematical_functions/special_functions/error_functions.py +3 -1
  59. pytcl/mathematical_functions/special_functions/gamma_functions.py +4 -4
  60. pytcl/mathematical_functions/special_functions/hypergeometric.py +6 -4
  61. pytcl/mathematical_functions/transforms/fourier.py +8 -8
  62. pytcl/mathematical_functions/transforms/stft.py +12 -12
  63. pytcl/mathematical_functions/transforms/wavelets.py +9 -9
  64. pytcl/navigation/geodesy.py +3 -3
  65. pytcl/navigation/great_circle.py +5 -5
  66. pytcl/plotting/coordinates.py +7 -7
  67. pytcl/plotting/tracks.py +2 -2
  68. pytcl/static_estimation/maximum_likelihood.py +16 -14
  69. pytcl/static_estimation/robust.py +5 -5
  70. pytcl/terrain/loaders.py +5 -5
  71. pytcl/trackers/hypothesis.py +1 -1
  72. pytcl/trackers/mht.py +9 -9
  73. pytcl/trackers/multi_target.py +1 -1
  74. {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.3.dist-info}/LICENSE +0 -0
  75. {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.3.dist-info}/WHEEL +0 -0
  76. {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.3.dist-info}/top_level.txt +0 -0
@@ -23,7 +23,7 @@ References:
23
23
  (IEEE SPM, 2004)
24
24
  """
25
25
 
26
- from typing import Callable, NamedTuple
26
+ from typing import Any, Callable, NamedTuple
27
27
 
28
28
  import numpy as np
29
29
  from numpy.typing import NDArray
@@ -46,9 +46,9 @@ class RBPFParticle(NamedTuple):
46
46
  Particle weight (typically normalized to sum to 1)
47
47
  """
48
48
 
49
- y: NDArray
50
- x: NDArray
51
- P: NDArray
49
+ y: NDArray[Any]
50
+ x: NDArray[Any]
51
+ P: NDArray[Any]
52
52
  w: float
53
53
 
54
54
 
@@ -96,9 +96,9 @@ class RBPFFilter:
96
96
 
97
97
  def initialize(
98
98
  self,
99
- y0: NDArray,
100
- x0: NDArray,
101
- P0: NDArray,
99
+ y0: NDArray[Any],
100
+ x0: NDArray[Any],
101
+ P0: NDArray[Any],
102
102
  num_particles: int = 100,
103
103
  ) -> None:
104
104
  """Initialize particles.
@@ -132,12 +132,12 @@ class RBPFFilter:
132
132
 
133
133
  def predict(
134
134
  self,
135
- g: Callable[[NDArray], NDArray],
136
- G: NDArray,
137
- Qy: NDArray,
138
- f: Callable[[NDArray, NDArray], NDArray],
139
- F: NDArray,
140
- Qx: NDArray,
135
+ g: Callable[[NDArray[Any]], NDArray[Any]],
136
+ G: NDArray[Any],
137
+ Qy: NDArray[Any],
138
+ f: Callable[[NDArray[Any], NDArray[Any]], NDArray[Any]],
139
+ F: NDArray[Any],
140
+ Qx: NDArray[Any],
141
141
  ) -> None:
142
142
  """Predict step: propagate particles and linear states.
143
143
 
@@ -167,7 +167,7 @@ class RBPFFilter:
167
167
  )
168
168
 
169
169
  # Create wrapper for linear dynamics with current y_pred
170
- def f_wrapper(x):
170
+ def f_wrapper(x: NDArray[Any]) -> NDArray[Any]:
171
171
  return f(x, y_pred)
172
172
 
173
173
  # Predict linear component using EKF
@@ -185,10 +185,10 @@ class RBPFFilter:
185
185
 
186
186
  def update(
187
187
  self,
188
- z: NDArray,
189
- h: Callable[[NDArray, NDArray], NDArray],
190
- H: NDArray,
191
- R: NDArray,
188
+ z: NDArray[Any],
189
+ h: Callable[[NDArray[Any], NDArray[Any]], NDArray[Any]],
190
+ H: NDArray[Any],
191
+ R: NDArray[Any],
192
192
  ) -> None:
193
193
  """Update step: adapt particle weights based on measurement.
194
194
 
@@ -208,7 +208,7 @@ class RBPFFilter:
208
208
 
209
209
  for i, particle in enumerate(self.particles):
210
210
  # Create wrapper for measurement function with current y
211
- def h_wrapper(x):
211
+ def h_wrapper(x: NDArray[Any]) -> NDArray[Any]:
212
212
  return h(x, particle.y)
213
213
 
214
214
  # Update linear component (Kalman update)
@@ -248,7 +248,7 @@ class RBPFFilter:
248
248
  # Merge if too many particles
249
249
  self._merge_particles()
250
250
 
251
- def estimate(self) -> tuple[NDArray, NDArray]:
251
+ def estimate(self) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]:
252
252
  """Estimate state as weighted mean and covariance.
253
253
 
254
254
  Returns
@@ -417,13 +417,12 @@ class RBPFFilter:
417
417
  w_sum = sum(p.w for p in self.particles)
418
418
  if w_sum > 0:
419
419
  self.particles = [
420
- RBPFParticle(y=p.y, x=p.x, P=p.P, w=p.w / w_sum)
421
- for p in self.particles
420
+ RBPFParticle(y=p.y, x=p.x, P=p.P, w=p.w / w_sum) for p in self.particles
422
421
  ]
423
422
 
424
423
  @staticmethod
425
424
  def _kl_divergence(
426
- P1: NDArray, P2: NDArray, x1: NDArray, x2: NDArray
425
+ P1: NDArray[Any], P2: NDArray[Any], x1: NDArray[Any], x2: NDArray[Any]
427
426
  ) -> float:
428
427
  """Compute KL divergence between two Gaussians.
429
428
 
@@ -472,12 +471,12 @@ class RBPFFilter:
472
471
 
473
472
  def rbpf_predict(
474
473
  particles: list[RBPFParticle],
475
- g: Callable[[NDArray], NDArray],
476
- G: NDArray,
477
- Qy: NDArray,
478
- f: Callable[[NDArray, NDArray], NDArray],
479
- F: NDArray,
480
- Qx: NDArray,
474
+ g: Callable[[NDArray[Any]], NDArray[Any]],
475
+ G: NDArray[Any],
476
+ Qy: NDArray[Any],
477
+ f: Callable[[NDArray[Any], NDArray[Any]], NDArray[Any]],
478
+ F: NDArray[Any],
479
+ Qx: NDArray[Any],
481
480
  ) -> list[RBPFParticle]:
482
481
  """Predict step for RBPF particles.
483
482
 
@@ -508,12 +507,10 @@ def rbpf_predict(
508
507
  for particle in particles:
509
508
  # Predict nonlinear component
510
509
  y_pred = g(particle.y)
511
- y_pred = y_pred + np.random.multivariate_normal(
512
- np.zeros(y_pred.shape[0]), Qy
513
- )
510
+ y_pred = y_pred + np.random.multivariate_normal(np.zeros(y_pred.shape[0]), Qy)
514
511
 
515
512
  # Create wrapper for linear dynamics with current y_pred
516
- def f_wrapper(x):
513
+ def f_wrapper(x: NDArray[Any]) -> NDArray[Any]:
517
514
  return f(x, y_pred)
518
515
 
519
516
  # Predict linear component
@@ -532,10 +529,10 @@ def rbpf_predict(
532
529
 
533
530
  def rbpf_update(
534
531
  particles: list[RBPFParticle],
535
- z: NDArray,
536
- h: Callable[[NDArray, NDArray], NDArray],
537
- H: NDArray,
538
- R: NDArray,
532
+ z: NDArray[Any],
533
+ h: Callable[[NDArray[Any], NDArray[Any]], NDArray[Any]],
534
+ H: NDArray[Any],
535
+ R: NDArray[Any],
539
536
  ) -> list[RBPFParticle]:
540
537
  """Update step for RBPF particles.
541
538
 
@@ -562,7 +559,7 @@ def rbpf_update(
562
559
 
563
560
  for i, particle in enumerate(particles):
564
561
  # Create wrapper for measurement function with current y
565
- def h_wrapper(x):
562
+ def h_wrapper(x: NDArray[Any]) -> NDArray[Any]:
566
563
  return h(x, particle.y)
567
564
 
568
565
  # Update linear component
@@ -588,6 +585,5 @@ def rbpf_update(
588
585
 
589
586
  # Update with new weights
590
587
  return [
591
- RBPFParticle(y=p.y, x=p.x, P=p.P, w=w)
592
- for p, w in zip(new_particles, weights)
588
+ RBPFParticle(y=p.y, x=p.x, P=p.P, w=w) for p, w in zip(new_particles, weights)
593
589
  ]
@@ -13,7 +13,7 @@ References
13
13
 
14
14
  import logging
15
15
  from functools import lru_cache
16
- from typing import Optional, Tuple
16
+ from typing import Any, Optional, Tuple
17
17
 
18
18
  import numpy as np
19
19
  from numpy.typing import NDArray
@@ -37,7 +37,7 @@ def _associated_legendre_cached(
37
37
  m_max: int,
38
38
  x_quantized: float,
39
39
  normalized: bool,
40
- ) -> tuple:
40
+ ) -> tuple[tuple[np.ndarray[Any, Any], ...], ...]:
41
41
  """Cached Legendre polynomial computation (internal).
42
42
 
43
43
  Returns tuple of tuples for hashability.
@@ -543,7 +543,7 @@ def clear_legendre_cache() -> None:
543
543
  _logger.debug("Legendre polynomial cache cleared")
544
544
 
545
545
 
546
- def get_legendre_cache_info():
546
+ def get_legendre_cache_info() -> Any:
547
547
  """Get cache statistics for Legendre polynomials.
548
548
 
549
549
  Returns
pytcl/gravity/tides.py CHANGED
@@ -77,9 +77,9 @@ class OceanTideLoading(NamedTuple):
77
77
  Names of tidal constituents.
78
78
  """
79
79
 
80
- amplitude: NDArray
81
- phase: NDArray
82
- constituents: Tuple[str, ...]
80
+ amplitude: NDArray[np.floating]
81
+ phase: NDArray[np.floating]
82
+ constituents: tuple[str, ...]
83
83
 
84
84
 
85
85
  # Love and Shida numbers for degree 2 (IERS 2010)
@@ -593,9 +593,9 @@ def solid_earth_tide_gravity(
593
593
 
594
594
  def ocean_tide_loading_displacement(
595
595
  mjd: float,
596
- amplitude: NDArray,
597
- phase: NDArray,
598
- constituents: Tuple[str, ...] = ("M2", "S2", "N2", "K2", "K1", "O1", "P1", "Q1"),
596
+ amplitude: NDArray[np.floating],
597
+ phase: NDArray[np.floating],
598
+ constituents: tuple[str, ...] = ("M2", "S2", "N2", "K2", "K1", "O1", "P1", "Q1"),
599
599
  ) -> TidalDisplacement:
600
600
  """
601
601
  Compute ocean tide loading displacement.
pytcl/logging_config.py CHANGED
@@ -26,7 +26,7 @@ import functools
26
26
  import logging
27
27
  import time
28
28
  from contextlib import contextmanager
29
- from typing import Any, Callable, Optional, TypeVar
29
+ from typing import Any, Callable, Generator, Optional, TypeVar
30
30
 
31
31
  # Type variable for decorated functions
32
32
  F = TypeVar("F", bound=Callable[..., Any])
@@ -201,7 +201,7 @@ def TimingContext(
201
201
  logger: Optional[logging.Logger] = None,
202
202
  name: str = "operation",
203
203
  level: int = logging.DEBUG,
204
- ):
204
+ ) -> Generator[None, None, None]:
205
205
  """
206
206
  Context manager for timing code blocks.
207
207
 
@@ -268,7 +268,7 @@ class PerformanceTracker:
268
268
  self.max_ms = 0.0
269
269
 
270
270
  @contextmanager
271
- def track(self):
271
+ def track(self) -> Generator[None, None, None]:
272
272
  """Track a single operation."""
273
273
  start = time.perf_counter()
274
274
  try:
pytcl/magnetism/emm.py CHANGED
@@ -24,7 +24,7 @@ References
24
24
  import os
25
25
  from functools import lru_cache
26
26
  from pathlib import Path
27
- from typing import Dict, NamedTuple, Optional, Tuple
27
+ from typing import Any, NamedTuple, Optional, Tuple
28
28
 
29
29
  import numpy as np
30
30
  from numpy.typing import NDArray
@@ -32,7 +32,7 @@ from numpy.typing import NDArray
32
32
  from .wmm import MagneticResult
33
33
 
34
34
  # Model parameters
35
- EMM_PARAMETERS: Dict[str, Dict] = {
35
+ EMM_PARAMETERS: dict[str, dict[str, Any]] = {
36
36
  "EMM2017": {
37
37
  "n_max": 790,
38
38
  "epoch": 2017.0,
@@ -119,7 +119,14 @@ def _ensure_data_dir() -> Path:
119
119
  def parse_emm_file(
120
120
  filepath: Path,
121
121
  n_max: Optional[int] = None,
122
- ) -> Tuple[NDArray, NDArray, NDArray, NDArray, float, int]:
122
+ ) -> tuple[
123
+ NDArray[np.floating],
124
+ NDArray[np.floating],
125
+ NDArray[np.floating],
126
+ NDArray[np.floating],
127
+ float,
128
+ int,
129
+ ]:
123
130
  """Parse an EMM/WMMHR coefficient file.
124
131
 
125
132
  The file format is similar to WMM but with more coefficients:
pytcl/magnetism/wmm.py CHANGED
@@ -13,7 +13,7 @@ References
13
13
  """
14
14
 
15
15
  from functools import lru_cache
16
- from typing import NamedTuple, Optional, Tuple
16
+ from typing import Any, NamedTuple, Optional, Tuple
17
17
 
18
18
  import numpy as np
19
19
  from numpy.typing import NDArray
@@ -482,7 +482,7 @@ def _magnetic_field_spherical_cached(
482
482
 
483
483
 
484
484
  # Registry to hold coefficient sets by id
485
- _coefficient_registry: dict = {}
485
+ _coefficient_registry: dict[str, Any] = {}
486
486
 
487
487
 
488
488
  def _register_coefficients(coeffs: "MagneticCoefficients") -> int:
@@ -579,7 +579,7 @@ def _compute_magnetic_field_spherical_impl(
579
579
  # =============================================================================
580
580
 
581
581
 
582
- def get_magnetic_cache_info() -> dict:
582
+ def get_magnetic_cache_info() -> dict[str, Any]:
583
583
  """
584
584
  Get information about the magnetic field computation cache.
585
585
 
@@ -630,7 +630,7 @@ def clear_magnetic_cache() -> None:
630
630
 
631
631
  def configure_magnetic_cache(
632
632
  maxsize: Optional[int] = None,
633
- precision: Optional[dict] = None,
633
+ precision: Optional[dict[str, Any]] = None,
634
634
  ) -> None:
635
635
  """
636
636
  Configure the magnetic field computation cache.
@@ -7,7 +7,7 @@ related operations commonly used in assignment problems and data association.
7
7
 
8
8
  import itertools
9
9
  from functools import lru_cache
10
- from typing import Iterator, List, Optional, Tuple
10
+ from typing import Any, Iterator, List, Optional, Tuple
11
11
 
12
12
  from numpy.typing import ArrayLike
13
13
 
@@ -108,7 +108,7 @@ def n_permute_k(n: int, k: int) -> int:
108
108
  def permutations(
109
109
  items: ArrayLike,
110
110
  k: Optional[int] = None,
111
- ) -> Iterator[Tuple]:
111
+ ) -> Iterator[tuple[Any, ...]]:
112
112
  """
113
113
  Generate all k-permutations of items.
114
114
 
@@ -136,7 +136,7 @@ def permutations(
136
136
  def combinations(
137
137
  items: ArrayLike,
138
138
  k: int,
139
- ) -> Iterator[Tuple]:
139
+ ) -> Iterator[tuple[Any, ...]]:
140
140
  """
141
141
  Generate all k-combinations of items.
142
142
 
@@ -164,7 +164,7 @@ def combinations(
164
164
  def combinations_with_replacement(
165
165
  items: ArrayLike,
166
166
  k: int,
167
- ) -> Iterator[Tuple]:
167
+ ) -> Iterator[tuple[Any, ...]]:
168
168
  """
169
169
  Generate all k-combinations with replacement.
170
170
 
@@ -263,7 +263,7 @@ def permutation_unrank(rank: int, n: int) -> List[int]:
263
263
  return perm
264
264
 
265
265
 
266
- def next_permutation(perm: ArrayLike) -> Optional[List]:
266
+ def next_permutation(perm: ArrayLike) -> Optional[List[Any]]:
267
267
  """
268
268
  Generate the next permutation in lexicographic order.
269
269
 
@@ -5,7 +5,7 @@ This module provides geometric functions for points, lines, planes,
5
5
  polygons, and related operations used in tracking applications.
6
6
  """
7
7
 
8
- from typing import Optional, Tuple
8
+ from typing import Any, Optional, Tuple
9
9
 
10
10
  import numpy as np
11
11
  from numpy.typing import ArrayLike, NDArray
@@ -527,12 +527,12 @@ def minimum_bounding_circle(
527
527
  """
528
528
  points = np.asarray(points, dtype=np.float64)
529
529
 
530
- def circle_from_two_points(p1, p2):
530
+ def circle_from_two_points(p1: Any, p2: Any) -> tuple[Any, Any]:
531
531
  center = (p1 + p2) / 2
532
532
  radius = np.linalg.norm(p1 - center)
533
533
  return center, radius
534
534
 
535
- def circle_from_three_points(p1, p2, p3):
535
+ def circle_from_three_points(p1: Any, p2: Any, p3: Any) -> tuple[Any, Any]:
536
536
  ax, ay = p1
537
537
  bx, by = p2
538
538
  cx, cy = p3
@@ -565,10 +565,10 @@ def minimum_bounding_circle(
565
565
  radius = np.linalg.norm(p1 - center)
566
566
  return center, radius
567
567
 
568
- def is_inside(c, r, p):
568
+ def is_inside(c: Any, r: Any, p: Any) -> Any:
569
569
  return np.linalg.norm(p - c) <= r + 1e-10
570
570
 
571
- def welzl(P, R):
571
+ def welzl(P: Any, R: Any) -> tuple[Any, Any]:
572
572
  if len(P) == 0 or len(R) == 3:
573
573
  if len(R) == 0:
574
574
  return np.array([0.0, 0.0]), 0.0
@@ -5,7 +5,7 @@ This module provides Gaussian quadrature rules and numerical integration
5
5
  functions commonly used in state estimation and filtering.
6
6
  """
7
7
 
8
- from typing import Callable, Literal, Optional, Tuple
8
+ from typing import Any, Callable, Literal, Optional, Tuple
9
9
 
10
10
  import numpy as np
11
11
  import scipy.integrate as integrate
@@ -158,7 +158,7 @@ def quad(
158
158
  f: Callable[[float], float],
159
159
  a: float,
160
160
  b: float,
161
- **kwargs,
161
+ **kwargs: Any,
162
162
  ) -> Tuple[float, float]:
163
163
  """
164
164
  Adaptive quadrature integration.
@@ -203,7 +203,7 @@ def dblquad(
203
203
  b: float,
204
204
  gfun: Callable[[float], float],
205
205
  hfun: Callable[[float], float],
206
- **kwargs,
206
+ **kwargs: Any,
207
207
  ) -> Tuple[float, float]:
208
208
  """
209
209
  Double integration.
@@ -248,7 +248,7 @@ def tplquad(
248
248
  hfun: Callable[[float], float],
249
249
  qfun: Callable[[float, float], float],
250
250
  rfun: Callable[[float, float], float],
251
- **kwargs,
251
+ **kwargs: Any,
252
252
  ) -> Tuple[float, float]:
253
253
  """
254
254
  Triple integration.
@@ -290,11 +290,11 @@ def tplquad(
290
290
 
291
291
 
292
292
  def fixed_quad(
293
- f: Callable[[NDArray], NDArray],
293
+ f: Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]],
294
294
  a: float,
295
295
  b: float,
296
296
  n: int = 5,
297
- ) -> Tuple[float, None]:
297
+ ) -> tuple[float, None]:
298
298
  """
299
299
  Fixed-order Gaussian quadrature.
300
300
 
@@ -24,7 +24,7 @@ References
24
24
  Systems, 19(4), 608-621.
25
25
  """
26
26
 
27
- from typing import NamedTuple, Optional
27
+ from typing import Any, NamedTuple, Optional
28
28
 
29
29
  import numpy as np
30
30
  from numba import njit, prange
@@ -214,12 +214,12 @@ def detection_probability(
214
214
 
215
215
  @njit(cache=True, fastmath=True)
216
216
  def _cfar_ca_kernel(
217
- signal: np.ndarray,
217
+ signal: np.ndarray[Any, Any],
218
218
  guard_cells: int,
219
219
  ref_cells: int,
220
220
  alpha: float,
221
- noise_estimate: np.ndarray,
222
- threshold: np.ndarray,
221
+ noise_estimate: np.ndarray[Any, Any],
222
+ threshold: np.ndarray[Any, Any],
223
223
  ) -> None:
224
224
  """JIT-compiled CA-CFAR kernel."""
225
225
  n = len(signal)
@@ -252,12 +252,12 @@ def _cfar_ca_kernel(
252
252
 
253
253
  @njit(cache=True, fastmath=True)
254
254
  def _cfar_go_kernel(
255
- signal: np.ndarray,
255
+ signal: np.ndarray[Any, Any],
256
256
  guard_cells: int,
257
257
  ref_cells: int,
258
258
  alpha: float,
259
- noise_estimate: np.ndarray,
260
- threshold: np.ndarray,
259
+ noise_estimate: np.ndarray[Any, Any],
260
+ threshold: np.ndarray[Any, Any],
261
261
  ) -> None:
262
262
  """JIT-compiled GO-CFAR kernel."""
263
263
  n = len(signal)
@@ -290,12 +290,12 @@ def _cfar_go_kernel(
290
290
 
291
291
  @njit(cache=True, fastmath=True)
292
292
  def _cfar_so_kernel(
293
- signal: np.ndarray,
293
+ signal: np.ndarray[Any, Any],
294
294
  guard_cells: int,
295
295
  ref_cells: int,
296
296
  alpha: float,
297
- noise_estimate: np.ndarray,
298
- threshold: np.ndarray,
297
+ noise_estimate: np.ndarray[Any, Any],
298
+ threshold: np.ndarray[Any, Any],
299
299
  ) -> None:
300
300
  """JIT-compiled SO-CFAR kernel."""
301
301
  n = len(signal)
@@ -331,13 +331,13 @@ def _cfar_so_kernel(
331
331
 
332
332
  @njit(cache=True, fastmath=True)
333
333
  def _cfar_os_kernel(
334
- signal: np.ndarray,
334
+ signal: np.ndarray[Any, Any],
335
335
  guard_cells: int,
336
336
  ref_cells: int,
337
337
  k: int,
338
338
  alpha: float,
339
- noise_estimate: np.ndarray,
340
- threshold: np.ndarray,
339
+ noise_estimate: np.ndarray[Any, Any],
340
+ threshold: np.ndarray[Any, Any],
341
341
  ) -> None:
342
342
  """JIT-compiled OS-CFAR kernel."""
343
343
  n = len(signal)
@@ -378,14 +378,14 @@ def _cfar_os_kernel(
378
378
 
379
379
  @njit(cache=True, fastmath=True, parallel=True)
380
380
  def _cfar_2d_ca_kernel(
381
- image: np.ndarray,
381
+ image: np.ndarray[Any, Any],
382
382
  guard_rows: int,
383
383
  guard_cols: int,
384
384
  ref_rows: int,
385
385
  ref_cols: int,
386
386
  alpha: float,
387
- noise_estimate: np.ndarray,
388
- threshold: np.ndarray,
387
+ noise_estimate: np.ndarray[Any, Any],
388
+ threshold: np.ndarray[Any, Any],
389
389
  ) -> None:
390
390
  """JIT-compiled 2D CA-CFAR kernel with parallel execution."""
391
391
  n_rows, n_cols = image.shape
@@ -426,14 +426,14 @@ def _cfar_2d_ca_kernel(
426
426
 
427
427
  @njit(cache=True, fastmath=True, parallel=True)
428
428
  def _cfar_2d_go_kernel(
429
- image: np.ndarray,
429
+ image: np.ndarray[Any, Any],
430
430
  guard_rows: int,
431
431
  guard_cols: int,
432
432
  ref_rows: int,
433
433
  ref_cols: int,
434
434
  alpha: float,
435
- noise_estimate: np.ndarray,
436
- threshold: np.ndarray,
435
+ noise_estimate: np.ndarray[Any, Any],
436
+ threshold: np.ndarray[Any, Any],
437
437
  ) -> None:
438
438
  """JIT-compiled 2D GO-CFAR kernel with parallel execution."""
439
439
  n_rows, n_cols = image.shape
@@ -478,14 +478,14 @@ def _cfar_2d_go_kernel(
478
478
 
479
479
  @njit(cache=True, fastmath=True, parallel=True)
480
480
  def _cfar_2d_so_kernel(
481
- image: np.ndarray,
481
+ image: np.ndarray[Any, Any],
482
482
  guard_rows: int,
483
483
  guard_cols: int,
484
484
  ref_rows: int,
485
485
  ref_cols: int,
486
486
  alpha: float,
487
- noise_estimate: np.ndarray,
488
- threshold: np.ndarray,
487
+ noise_estimate: np.ndarray[Any, Any],
488
+ threshold: np.ndarray[Any, Any],
489
489
  ) -> None:
490
490
  """JIT-compiled 2D SO-CFAR kernel with parallel execution."""
491
491
  n_rows, n_cols = image.shape
@@ -830,8 +830,8 @@ def cfar_os(
830
830
 
831
831
  def cfar_2d(
832
832
  image: ArrayLike,
833
- guard_cells: tuple,
834
- ref_cells: tuple,
833
+ guard_cells: tuple[int, int],
834
+ ref_cells: tuple[int, int],
835
835
  pfa: float = 1e-6,
836
836
  method: str = "ca",
837
837
  alpha: Optional[float] = None,