nrl-tracker 1.6.0__py3-none-any.whl → 1.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/METADATA +14 -10
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/RECORD +75 -68
- pytcl/__init__.py +2 -2
- pytcl/assignment_algorithms/__init__.py +28 -0
- pytcl/assignment_algorithms/gating.py +10 -10
- pytcl/assignment_algorithms/jpda.py +40 -40
- pytcl/assignment_algorithms/nd_assignment.py +379 -0
- pytcl/assignment_algorithms/network_flow.py +371 -0
- pytcl/assignment_algorithms/three_dimensional/assignment.py +3 -3
- pytcl/astronomical/__init__.py +35 -0
- pytcl/astronomical/ephemerides.py +14 -11
- pytcl/astronomical/reference_frames.py +110 -4
- pytcl/astronomical/relativity.py +6 -5
- pytcl/astronomical/special_orbits.py +532 -0
- pytcl/atmosphere/__init__.py +11 -0
- pytcl/atmosphere/nrlmsise00.py +809 -0
- pytcl/clustering/dbscan.py +2 -2
- pytcl/clustering/gaussian_mixture.py +3 -3
- pytcl/clustering/hierarchical.py +15 -15
- pytcl/clustering/kmeans.py +4 -4
- pytcl/containers/base.py +3 -3
- pytcl/containers/cluster_set.py +12 -2
- pytcl/containers/covertree.py +5 -3
- pytcl/containers/rtree.py +1 -1
- pytcl/containers/vptree.py +4 -2
- pytcl/coordinate_systems/conversions/geodetic.py +272 -5
- pytcl/coordinate_systems/jacobians/jacobians.py +2 -2
- pytcl/coordinate_systems/projections/projections.py +2 -2
- pytcl/coordinate_systems/rotations/rotations.py +10 -6
- pytcl/core/validation.py +3 -3
- pytcl/dynamic_estimation/__init__.py +26 -0
- pytcl/dynamic_estimation/gaussian_sum_filter.py +434 -0
- pytcl/dynamic_estimation/imm.py +14 -14
- pytcl/dynamic_estimation/kalman/__init__.py +12 -0
- pytcl/dynamic_estimation/kalman/constrained.py +382 -0
- pytcl/dynamic_estimation/kalman/extended.py +8 -8
- pytcl/dynamic_estimation/kalman/h_infinity.py +2 -2
- pytcl/dynamic_estimation/kalman/square_root.py +8 -2
- pytcl/dynamic_estimation/kalman/sr_ukf.py +3 -3
- pytcl/dynamic_estimation/kalman/ud_filter.py +11 -5
- pytcl/dynamic_estimation/kalman/unscented.py +8 -6
- pytcl/dynamic_estimation/particle_filters/bootstrap.py +15 -15
- pytcl/dynamic_estimation/rbpf.py +589 -0
- pytcl/gravity/spherical_harmonics.py +3 -3
- pytcl/gravity/tides.py +6 -6
- pytcl/logging_config.py +3 -3
- pytcl/magnetism/emm.py +10 -3
- pytcl/magnetism/wmm.py +4 -4
- pytcl/mathematical_functions/combinatorics/combinatorics.py +5 -5
- pytcl/mathematical_functions/geometry/geometry.py +5 -5
- pytcl/mathematical_functions/numerical_integration/quadrature.py +6 -6
- pytcl/mathematical_functions/signal_processing/detection.py +24 -24
- pytcl/mathematical_functions/signal_processing/filters.py +14 -14
- pytcl/mathematical_functions/signal_processing/matched_filter.py +12 -12
- pytcl/mathematical_functions/special_functions/bessel.py +15 -3
- pytcl/mathematical_functions/special_functions/debye.py +5 -1
- pytcl/mathematical_functions/special_functions/error_functions.py +3 -1
- pytcl/mathematical_functions/special_functions/gamma_functions.py +4 -4
- pytcl/mathematical_functions/special_functions/hypergeometric.py +6 -4
- pytcl/mathematical_functions/transforms/fourier.py +8 -8
- pytcl/mathematical_functions/transforms/stft.py +12 -12
- pytcl/mathematical_functions/transforms/wavelets.py +9 -9
- pytcl/navigation/geodesy.py +3 -3
- pytcl/navigation/great_circle.py +5 -5
- pytcl/plotting/coordinates.py +7 -7
- pytcl/plotting/tracks.py +2 -2
- pytcl/static_estimation/maximum_likelihood.py +16 -14
- pytcl/static_estimation/robust.py +5 -5
- pytcl/terrain/loaders.py +5 -5
- pytcl/trackers/hypothesis.py +1 -1
- pytcl/trackers/mht.py +9 -9
- pytcl/trackers/multi_target.py +1 -1
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/LICENSE +0 -0
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/WHEEL +0 -0
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/top_level.txt +0 -0
|
@@ -22,7 +22,7 @@ References
|
|
|
22
22
|
Speech, and Signal Processing, 32(2), 236-243.
|
|
23
23
|
"""
|
|
24
24
|
|
|
25
|
-
from typing import NamedTuple, Optional, Union
|
|
25
|
+
from typing import Any, NamedTuple, Optional, Union
|
|
26
26
|
|
|
27
27
|
import numpy as np
|
|
28
28
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -77,7 +77,7 @@ class Spectrogram(NamedTuple):
|
|
|
77
77
|
|
|
78
78
|
|
|
79
79
|
def get_window(
|
|
80
|
-
window: Union[str, tuple, ArrayLike],
|
|
80
|
+
window: Union[str, tuple[str, Any], ArrayLike],
|
|
81
81
|
length: int,
|
|
82
82
|
fftbins: bool = True,
|
|
83
83
|
) -> NDArray[np.floating]:
|
|
@@ -174,7 +174,7 @@ def window_bandwidth(
|
|
|
174
174
|
def stft(
|
|
175
175
|
x: ArrayLike,
|
|
176
176
|
fs: float = 1.0,
|
|
177
|
-
window: Union[str, tuple, ArrayLike] = "hann",
|
|
177
|
+
window: Union[str, tuple[str, Any], ArrayLike] = "hann",
|
|
178
178
|
nperseg: int = 256,
|
|
179
179
|
noverlap: Optional[int] = None,
|
|
180
180
|
nfft: Optional[int] = None,
|
|
@@ -264,13 +264,13 @@ def stft(
|
|
|
264
264
|
def istft(
|
|
265
265
|
Zxx: ArrayLike,
|
|
266
266
|
fs: float = 1.0,
|
|
267
|
-
window: Union[str, tuple, ArrayLike] = "hann",
|
|
267
|
+
window: Union[str, tuple[str, Any], ArrayLike] = "hann",
|
|
268
268
|
nperseg: Optional[int] = None,
|
|
269
269
|
noverlap: Optional[int] = None,
|
|
270
270
|
nfft: Optional[int] = None,
|
|
271
271
|
input_onesided: bool = True,
|
|
272
272
|
boundary: bool = True,
|
|
273
|
-
) -> tuple:
|
|
273
|
+
) -> tuple[NDArray[np.floating], NDArray[np.floating]]:
|
|
274
274
|
"""
|
|
275
275
|
Compute the inverse Short-Time Fourier Transform.
|
|
276
276
|
|
|
@@ -351,7 +351,7 @@ def istft(
|
|
|
351
351
|
def spectrogram(
|
|
352
352
|
x: ArrayLike,
|
|
353
353
|
fs: float = 1.0,
|
|
354
|
-
window: Union[str, tuple, ArrayLike] = "hann",
|
|
354
|
+
window: Union[str, tuple[str, Any], ArrayLike] = "hann",
|
|
355
355
|
nperseg: int = 256,
|
|
356
356
|
noverlap: Optional[int] = None,
|
|
357
357
|
nfft: Optional[int] = None,
|
|
@@ -436,11 +436,11 @@ def spectrogram(
|
|
|
436
436
|
def reassigned_spectrogram(
|
|
437
437
|
x: ArrayLike,
|
|
438
438
|
fs: float = 1.0,
|
|
439
|
-
window: Union[str, tuple, ArrayLike] = "hann",
|
|
439
|
+
window: Union[str, tuple[str, Any], ArrayLike] = "hann",
|
|
440
440
|
nperseg: int = 256,
|
|
441
441
|
noverlap: Optional[int] = None,
|
|
442
442
|
nfft: Optional[int] = None,
|
|
443
|
-
) -> tuple:
|
|
443
|
+
) -> tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.floating]]:
|
|
444
444
|
"""
|
|
445
445
|
Compute reassigned spectrogram for improved time-frequency resolution.
|
|
446
446
|
|
|
@@ -538,7 +538,7 @@ def mel_spectrogram(
|
|
|
538
538
|
window: str = "hann",
|
|
539
539
|
nperseg: int = 2048,
|
|
540
540
|
noverlap: Optional[int] = None,
|
|
541
|
-
) -> tuple:
|
|
541
|
+
) -> tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.floating]]:
|
|
542
542
|
"""
|
|
543
543
|
Compute mel-scaled spectrogram.
|
|
544
544
|
|
|
@@ -611,15 +611,15 @@ def mel_spectrogram(
|
|
|
611
611
|
# Mel frequency centers
|
|
612
612
|
mel_freqs = _mel_frequencies(n_mels, fmin, fmax)
|
|
613
613
|
|
|
614
|
-
return mel_freqs, spec_result.times, mel_spec
|
|
614
|
+
return (mel_freqs, spec_result.times, mel_spec)
|
|
615
615
|
|
|
616
616
|
|
|
617
|
-
def _hz_to_mel(hz: Union[float, ArrayLike]) -> Union[float, NDArray]:
|
|
617
|
+
def _hz_to_mel(hz: Union[float, ArrayLike]) -> Union[float, NDArray[np.floating]]:
|
|
618
618
|
"""Convert frequency in Hz to mel scale."""
|
|
619
619
|
return 2595.0 * np.log10(1.0 + np.asarray(hz) / 700.0)
|
|
620
620
|
|
|
621
621
|
|
|
622
|
-
def _mel_to_hz(mel: Union[float, ArrayLike]) -> Union[float, NDArray]:
|
|
622
|
+
def _mel_to_hz(mel: Union[float, ArrayLike]) -> Union[float, NDArray[np.floating]]:
|
|
623
623
|
"""Convert mel scale to frequency in Hz."""
|
|
624
624
|
return 700.0 * (10.0 ** (np.asarray(mel) / 2595.0) - 1.0)
|
|
625
625
|
|
|
@@ -20,7 +20,7 @@ References
|
|
|
20
20
|
.. [2] Daubechies, I. (1992). Ten Lectures on Wavelets. SIAM.
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
|
-
from typing import Callable, List, NamedTuple, Optional, Union
|
|
23
|
+
from typing import Any, Callable, List, NamedTuple, Optional, Union
|
|
24
24
|
|
|
25
25
|
import numpy as np
|
|
26
26
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -262,7 +262,7 @@ def gaussian_wavelet(
|
|
|
262
262
|
def cwt(
|
|
263
263
|
signal: ArrayLike,
|
|
264
264
|
scales: ArrayLike,
|
|
265
|
-
wavelet: Union[str, Callable[[int], NDArray]] = "morlet",
|
|
265
|
+
wavelet: Union[str, Callable[[int], NDArray[np.floating]]] = "morlet",
|
|
266
266
|
fs: float = 1.0,
|
|
267
267
|
method: str = "fft",
|
|
268
268
|
) -> CWTResult:
|
|
@@ -312,16 +312,16 @@ def cwt(
|
|
|
312
312
|
n = len(signal)
|
|
313
313
|
|
|
314
314
|
# Determine wavelet function
|
|
315
|
-
def _morlet_default(M: int) -> NDArray:
|
|
315
|
+
def _morlet_default(M: int) -> NDArray[np.floating]:
|
|
316
316
|
return morlet_wavelet(M, w=5.0)
|
|
317
317
|
|
|
318
|
-
def _ricker_default(M: int) -> NDArray:
|
|
318
|
+
def _ricker_default(M: int) -> NDArray[np.floating]:
|
|
319
319
|
return ricker_wavelet(M, a=1.0)
|
|
320
320
|
|
|
321
|
-
def _gaussian1_default(M: int) -> NDArray:
|
|
321
|
+
def _gaussian1_default(M: int) -> NDArray[np.floating]:
|
|
322
322
|
return gaussian_wavelet(M, order=1)
|
|
323
323
|
|
|
324
|
-
def _gaussian2_default(M: int) -> NDArray:
|
|
324
|
+
def _gaussian2_default(M: int) -> NDArray[np.floating]:
|
|
325
325
|
return gaussian_wavelet(M, order=2)
|
|
326
326
|
|
|
327
327
|
if callable(wavelet):
|
|
@@ -596,7 +596,7 @@ def dwt_single_level(
|
|
|
596
596
|
signal: ArrayLike,
|
|
597
597
|
wavelet: str = "db4",
|
|
598
598
|
mode: str = "symmetric",
|
|
599
|
-
) -> tuple:
|
|
599
|
+
) -> tuple[NDArray[np.floating], NDArray[np.floating]]:
|
|
600
600
|
"""
|
|
601
601
|
Compute single-level DWT decomposition.
|
|
602
602
|
|
|
@@ -673,7 +673,7 @@ def wpt(
|
|
|
673
673
|
wavelet: str = "db4",
|
|
674
674
|
level: Optional[int] = None,
|
|
675
675
|
mode: str = "symmetric",
|
|
676
|
-
) -> dict:
|
|
676
|
+
) -> dict[str, NDArray[np.floating]]:
|
|
677
677
|
"""
|
|
678
678
|
Compute the Wavelet Packet Transform.
|
|
679
679
|
|
|
@@ -748,7 +748,7 @@ def available_wavelets() -> List[str]:
|
|
|
748
748
|
return pywt.wavelist()
|
|
749
749
|
|
|
750
750
|
|
|
751
|
-
def wavelet_info(wavelet: str) -> dict:
|
|
751
|
+
def wavelet_info(wavelet: str) -> dict[str, Any]:
|
|
752
752
|
"""
|
|
753
753
|
Get information about a wavelet.
|
|
754
754
|
|
pytcl/navigation/geodesy.py
CHANGED
|
@@ -10,7 +10,7 @@ This module provides geodetic utilities including:
|
|
|
10
10
|
|
|
11
11
|
import logging
|
|
12
12
|
from functools import lru_cache
|
|
13
|
-
from typing import NamedTuple, Tuple
|
|
13
|
+
from typing import Any, NamedTuple, Tuple
|
|
14
14
|
|
|
15
15
|
import numpy as np
|
|
16
16
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -715,12 +715,12 @@ def clear_geodesy_cache() -> None:
|
|
|
715
715
|
_logger.debug("Geodesy caches cleared")
|
|
716
716
|
|
|
717
717
|
|
|
718
|
-
def get_geodesy_cache_info() -> dict:
|
|
718
|
+
def get_geodesy_cache_info() -> dict[str, Any]:
|
|
719
719
|
"""Get cache statistics for geodesy computations.
|
|
720
720
|
|
|
721
721
|
Returns
|
|
722
722
|
-------
|
|
723
|
-
dict
|
|
723
|
+
dict[str, Any]
|
|
724
724
|
Dictionary with cache statistics for inverse and direct geodetic caches.
|
|
725
725
|
"""
|
|
726
726
|
return {
|
pytcl/navigation/great_circle.py
CHANGED
|
@@ -12,7 +12,7 @@ computing the shortest path on a sphere, including:
|
|
|
12
12
|
|
|
13
13
|
import logging
|
|
14
14
|
from functools import lru_cache
|
|
15
|
-
from typing import NamedTuple, Optional, Tuple
|
|
15
|
+
from typing import Any, NamedTuple, Optional, Tuple
|
|
16
16
|
|
|
17
17
|
import numpy as np
|
|
18
18
|
from numpy.typing import NDArray
|
|
@@ -505,7 +505,7 @@ def great_circle_intersect(
|
|
|
505
505
|
"""
|
|
506
506
|
|
|
507
507
|
# Convert to Cartesian unit vectors
|
|
508
|
-
def to_cartesian(lat, lon):
|
|
508
|
+
def to_cartesian(lat: Any, lon: Any) -> NDArray[np.float64]:
|
|
509
509
|
return np.array(
|
|
510
510
|
[np.cos(lat) * np.cos(lon), np.cos(lat) * np.sin(lon), np.sin(lat)]
|
|
511
511
|
)
|
|
@@ -661,7 +661,7 @@ def great_circle_tdoa_loc(
|
|
|
661
661
|
delta_d12 = delta_r12 / radius
|
|
662
662
|
delta_d13 = delta_r13 / radius
|
|
663
663
|
|
|
664
|
-
def objective(lat, lon):
|
|
664
|
+
def objective(lat: Any, lon: Any) -> Any:
|
|
665
665
|
"""Objective function: difference between computed and observed TDOAs."""
|
|
666
666
|
d1 = great_circle_distance(lat, lon, lat1, lon1, radius=1.0)
|
|
667
667
|
d2 = great_circle_distance(lat, lon, lat2, lon2, radius=1.0)
|
|
@@ -836,12 +836,12 @@ def clear_great_circle_cache() -> None:
|
|
|
836
836
|
_logger.debug("Great circle caches cleared")
|
|
837
837
|
|
|
838
838
|
|
|
839
|
-
def get_cache_info() -> dict:
|
|
839
|
+
def get_cache_info() -> dict[str, Any]:
|
|
840
840
|
"""Get cache statistics for great circle computations.
|
|
841
841
|
|
|
842
842
|
Returns
|
|
843
843
|
-------
|
|
844
|
-
dict
|
|
844
|
+
dict[str, Any]
|
|
845
845
|
Dictionary with cache statistics for distance and azimuth caches.
|
|
846
846
|
"""
|
|
847
847
|
return {
|
pytcl/plotting/coordinates.py
CHANGED
|
@@ -5,10 +5,10 @@ This module provides functions for visualizing coordinate systems,
|
|
|
5
5
|
rotations, and transformations in 2D and 3D.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from typing import List, Optional, Tuple
|
|
8
|
+
from typing import Any, List, Optional, Tuple
|
|
9
9
|
|
|
10
10
|
import numpy as np
|
|
11
|
-
from numpy.typing import ArrayLike
|
|
11
|
+
from numpy.typing import ArrayLike, NDArray
|
|
12
12
|
|
|
13
13
|
try:
|
|
14
14
|
import plotly.graph_objects as go
|
|
@@ -180,17 +180,17 @@ def plot_euler_angles(
|
|
|
180
180
|
angles = np.asarray(angles)
|
|
181
181
|
|
|
182
182
|
# Create rotation matrices for each axis
|
|
183
|
-
def rotx(a):
|
|
183
|
+
def rotx(a: Any) -> NDArray[np.float64]:
|
|
184
184
|
return np.array(
|
|
185
185
|
[[1, 0, 0], [0, np.cos(a), -np.sin(a)], [0, np.sin(a), np.cos(a)]]
|
|
186
186
|
)
|
|
187
187
|
|
|
188
|
-
def roty(a):
|
|
188
|
+
def roty(a: Any) -> NDArray[np.float64]:
|
|
189
189
|
return np.array(
|
|
190
190
|
[[np.cos(a), 0, np.sin(a)], [0, 1, 0], [-np.sin(a), 0, np.cos(a)]]
|
|
191
191
|
)
|
|
192
192
|
|
|
193
|
-
def rotz(a):
|
|
193
|
+
def rotz(a: Any) -> NDArray[np.float64]:
|
|
194
194
|
return np.array(
|
|
195
195
|
[[np.cos(a), -np.sin(a), 0], [np.sin(a), np.cos(a), 0], [0, 0, 1]]
|
|
196
196
|
)
|
|
@@ -293,7 +293,7 @@ def plot_quaternion_interpolation(
|
|
|
293
293
|
q_end = q_end / np.linalg.norm(q_end)
|
|
294
294
|
|
|
295
295
|
# SLERP interpolation
|
|
296
|
-
def quat_slerp(q1, q2, t):
|
|
296
|
+
def quat_slerp(q1: Any, q2: Any, t: Any) -> NDArray[np.float64]:
|
|
297
297
|
dot = np.dot(q1, q2)
|
|
298
298
|
if dot < 0:
|
|
299
299
|
q2 = -q2
|
|
@@ -303,7 +303,7 @@ def plot_quaternion_interpolation(
|
|
|
303
303
|
theta = np.arccos(dot)
|
|
304
304
|
return (np.sin((1 - t) * theta) * q1 + np.sin(t * theta) * q2) / np.sin(theta)
|
|
305
305
|
|
|
306
|
-
def quat_to_rotmat(q):
|
|
306
|
+
def quat_to_rotmat(q: Any) -> NDArray[np.float64]:
|
|
307
307
|
w, x, y, z = q
|
|
308
308
|
return np.array(
|
|
309
309
|
[
|
pytcl/plotting/tracks.py
CHANGED
|
@@ -196,7 +196,7 @@ def plot_tracking_result(
|
|
|
196
196
|
covariances: Optional[List[ArrayLike]] = None,
|
|
197
197
|
x_idx: int = 0,
|
|
198
198
|
y_idx: int = 2,
|
|
199
|
-
cov_xy_idx: tuple = (0, 2),
|
|
199
|
+
cov_xy_idx: tuple[int, int] = (0, 2),
|
|
200
200
|
ellipse_interval: int = 5,
|
|
201
201
|
n_std: float = 2.0,
|
|
202
202
|
title: str = "Tracking Result",
|
|
@@ -596,7 +596,7 @@ def create_animated_tracking(
|
|
|
596
596
|
covariances: Optional[List[ArrayLike]] = None,
|
|
597
597
|
x_idx: int = 0,
|
|
598
598
|
y_idx: int = 2,
|
|
599
|
-
cov_xy_idx: tuple = (0, 2),
|
|
599
|
+
cov_xy_idx: tuple[int, int] = (0, 2),
|
|
600
600
|
n_std: float = 2.0,
|
|
601
601
|
frame_duration: int = 100,
|
|
602
602
|
title: str = "Animated Tracking",
|
|
@@ -12,7 +12,7 @@ References
|
|
|
12
12
|
Wiley, 2001.
|
|
13
13
|
"""
|
|
14
14
|
|
|
15
|
-
from typing import Callable, NamedTuple, Optional
|
|
15
|
+
from typing import Any, Callable, NamedTuple, Optional
|
|
16
16
|
|
|
17
17
|
import numpy as np
|
|
18
18
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -75,10 +75,10 @@ class CRBResult(NamedTuple):
|
|
|
75
75
|
|
|
76
76
|
|
|
77
77
|
def fisher_information_numerical(
|
|
78
|
-
log_likelihood: Callable[[
|
|
78
|
+
log_likelihood: Callable[[np.ndarray[Any, Any]], float],
|
|
79
79
|
theta: ArrayLike,
|
|
80
80
|
h: float = 1e-5,
|
|
81
|
-
) ->
|
|
81
|
+
) -> np.ndarray[Any, Any]:
|
|
82
82
|
"""
|
|
83
83
|
Compute Fisher information matrix numerically.
|
|
84
84
|
|
|
@@ -189,11 +189,13 @@ def fisher_information_gaussian(
|
|
|
189
189
|
|
|
190
190
|
|
|
191
191
|
def fisher_information_exponential_family(
|
|
192
|
-
sufficient_stats: Callable[
|
|
192
|
+
sufficient_stats: Callable[
|
|
193
|
+
[np.ndarray[Any, Any], np.ndarray[Any, Any]], np.ndarray[Any, Any]
|
|
194
|
+
],
|
|
193
195
|
theta: ArrayLike,
|
|
194
196
|
data: ArrayLike,
|
|
195
197
|
h: float = 1e-5,
|
|
196
|
-
) ->
|
|
198
|
+
) -> np.ndarray[Any, Any]:
|
|
197
199
|
"""
|
|
198
200
|
Fisher information for exponential family distributions.
|
|
199
201
|
|
|
@@ -232,10 +234,10 @@ def fisher_information_exponential_family(
|
|
|
232
234
|
|
|
233
235
|
|
|
234
236
|
def observed_fisher_information(
|
|
235
|
-
log_likelihood: Callable[[
|
|
237
|
+
log_likelihood: Callable[[np.ndarray[Any, Any]], float],
|
|
236
238
|
theta: ArrayLike,
|
|
237
239
|
h: float = 1e-5,
|
|
238
|
-
) ->
|
|
240
|
+
) -> np.ndarray[Any, Any]:
|
|
239
241
|
"""
|
|
240
242
|
Compute observed Fisher information (negative Hessian).
|
|
241
243
|
|
|
@@ -409,10 +411,10 @@ def efficiency(
|
|
|
409
411
|
|
|
410
412
|
|
|
411
413
|
def mle_newton_raphson(
|
|
412
|
-
log_likelihood: Callable[[
|
|
413
|
-
score: Callable[[
|
|
414
|
+
log_likelihood: Callable[[np.ndarray[Any, Any]], float],
|
|
415
|
+
score: Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]],
|
|
414
416
|
theta_init: ArrayLike,
|
|
415
|
-
hessian: Optional[Callable[[
|
|
417
|
+
hessian: Optional[Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]]] = None,
|
|
416
418
|
max_iter: int = 100,
|
|
417
419
|
tol: float = 1e-8,
|
|
418
420
|
h: float = 1e-5,
|
|
@@ -460,7 +462,7 @@ def mle_newton_raphson(
|
|
|
460
462
|
|
|
461
463
|
converged = False
|
|
462
464
|
|
|
463
|
-
def numerical_hessian(t:
|
|
465
|
+
def numerical_hessian(t: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
|
|
464
466
|
H = np.zeros((n_params, n_params))
|
|
465
467
|
for i in range(n_params):
|
|
466
468
|
for j in range(i, n_params):
|
|
@@ -532,9 +534,9 @@ def mle_newton_raphson(
|
|
|
532
534
|
|
|
533
535
|
|
|
534
536
|
def mle_scoring(
|
|
535
|
-
log_likelihood: Callable[[
|
|
536
|
-
score: Callable[[
|
|
537
|
-
fisher_info: Callable[[
|
|
537
|
+
log_likelihood: Callable[[np.ndarray[Any, Any]], float],
|
|
538
|
+
score: Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]],
|
|
539
|
+
fisher_info: Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]],
|
|
538
540
|
theta_init: ArrayLike,
|
|
539
541
|
max_iter: int = 100,
|
|
540
542
|
tol: float = 1e-8,
|
|
@@ -12,7 +12,7 @@ References
|
|
|
12
12
|
Cartography," Communications of the ACM, 1981.
|
|
13
13
|
"""
|
|
14
14
|
|
|
15
|
-
from typing import Callable, NamedTuple, Optional
|
|
15
|
+
from typing import Any, Callable, NamedTuple, Optional
|
|
16
16
|
|
|
17
17
|
import numpy as np
|
|
18
18
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -313,8 +313,8 @@ def tau_scale(
|
|
|
313
313
|
def irls(
|
|
314
314
|
A: ArrayLike,
|
|
315
315
|
b: ArrayLike,
|
|
316
|
-
weight_func: Callable[[
|
|
317
|
-
scale_func: Callable[[
|
|
316
|
+
weight_func: Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]] = huber_weight,
|
|
317
|
+
scale_func: Callable[[np.ndarray[Any, Any]], float] = mad,
|
|
318
318
|
max_iter: int = 50,
|
|
319
319
|
tol: float = 1e-6,
|
|
320
320
|
) -> RobustResult:
|
|
@@ -455,7 +455,7 @@ def huber_regression(
|
|
|
455
455
|
Gaussian errors and resistance to outliers.
|
|
456
456
|
"""
|
|
457
457
|
|
|
458
|
-
def weight_func(r:
|
|
458
|
+
def weight_func(r: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
|
|
459
459
|
return huber_weight(r, c)
|
|
460
460
|
|
|
461
461
|
return irls(A, b, weight_func=weight_func, max_iter=max_iter, tol=tol)
|
|
@@ -504,7 +504,7 @@ def tukey_regression(
|
|
|
504
504
|
Huber for gross outliers.
|
|
505
505
|
"""
|
|
506
506
|
|
|
507
|
-
def weight_func(r:
|
|
507
|
+
def weight_func(r: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
|
|
508
508
|
return tukey_weight(r, c)
|
|
509
509
|
|
|
510
510
|
return irls(A, b, weight_func=weight_func, max_iter=max_iter, tol=tol)
|
pytcl/terrain/loaders.py
CHANGED
|
@@ -21,7 +21,7 @@ References
|
|
|
21
21
|
import os
|
|
22
22
|
from functools import lru_cache
|
|
23
23
|
from pathlib import Path
|
|
24
|
-
from typing import
|
|
24
|
+
from typing import Any, NamedTuple, Optional
|
|
25
25
|
|
|
26
26
|
import numpy as np
|
|
27
27
|
from numpy.typing import NDArray
|
|
@@ -31,7 +31,7 @@ from .dem import DEMGrid
|
|
|
31
31
|
# Model parameters
|
|
32
32
|
_GEBCO_BASE_URL = "https://www.gebco.net/data-products/gridded-bathymetry-data"
|
|
33
33
|
|
|
34
|
-
GEBCO_PARAMETERS:
|
|
34
|
+
GEBCO_PARAMETERS: dict[str, dict[str, Any]] = {
|
|
35
35
|
"GEBCO2024": {
|
|
36
36
|
"resolution_arcsec": 15.0,
|
|
37
37
|
"n_lat": 43200,
|
|
@@ -58,7 +58,7 @@ GEBCO_PARAMETERS: Dict[str, Dict] = {
|
|
|
58
58
|
},
|
|
59
59
|
}
|
|
60
60
|
|
|
61
|
-
EARTH2014_PARAMETERS:
|
|
61
|
+
EARTH2014_PARAMETERS: dict[str, dict[str, Any]] = {
|
|
62
62
|
"SUR": {
|
|
63
63
|
"description": "Physical surface (topography, ice surface, 0 over oceans)",
|
|
64
64
|
"file_pattern": "Earth2014.SUR2014.1min.geod.bin",
|
|
@@ -275,7 +275,7 @@ def parse_gebco_netcdf(
|
|
|
275
275
|
lat_max: Optional[float] = None,
|
|
276
276
|
lon_min: Optional[float] = None,
|
|
277
277
|
lon_max: Optional[float] = None,
|
|
278
|
-
) ->
|
|
278
|
+
) -> tuple[NDArray[np.floating], float, float, float, float]:
|
|
279
279
|
"""Parse GEBCO NetCDF file and extract region.
|
|
280
280
|
|
|
281
281
|
Parameters
|
|
@@ -369,7 +369,7 @@ def parse_earth2014_binary(
|
|
|
369
369
|
lat_max: Optional[float] = None,
|
|
370
370
|
lon_min: Optional[float] = None,
|
|
371
371
|
lon_max: Optional[float] = None,
|
|
372
|
-
) ->
|
|
372
|
+
) -> tuple[NDArray[np.floating], float, float, float, float]:
|
|
373
373
|
"""Parse Earth2014 binary file and extract region.
|
|
374
374
|
|
|
375
375
|
Earth2014 files are stored as int16 big-endian binary data,
|
pytcl/trackers/hypothesis.py
CHANGED
pytcl/trackers/mht.py
CHANGED
|
@@ -15,7 +15,7 @@ References
|
|
|
15
15
|
IEEE Trans. Automatic Control, 1979.
|
|
16
16
|
"""
|
|
17
17
|
|
|
18
|
-
from typing import Callable, Dict, List, NamedTuple, Optional
|
|
18
|
+
from typing import Callable, Dict, List, NamedTuple, Optional
|
|
19
19
|
|
|
20
20
|
import numpy as np
|
|
21
21
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -322,8 +322,8 @@ class MHTTracker:
|
|
|
322
322
|
def _predict_tracks(
|
|
323
323
|
self,
|
|
324
324
|
tracks: Dict[int, MHTTrack],
|
|
325
|
-
F: NDArray,
|
|
326
|
-
Q: NDArray,
|
|
325
|
+
F: NDArray[np.floating],
|
|
326
|
+
Q: NDArray[np.floating],
|
|
327
327
|
) -> Dict[int, MHTTrack]:
|
|
328
328
|
"""Predict all tracks forward in time."""
|
|
329
329
|
predicted = {}
|
|
@@ -352,8 +352,8 @@ class MHTTracker:
|
|
|
352
352
|
def _compute_gating_and_likelihoods(
|
|
353
353
|
self,
|
|
354
354
|
tracks: Dict[int, MHTTrack],
|
|
355
|
-
Z: NDArray,
|
|
356
|
-
) -> tuple[
|
|
355
|
+
Z: NDArray[np.floating],
|
|
356
|
+
) -> tuple[set[tuple[int, int]], dict[tuple[int, int], float]]:
|
|
357
357
|
"""Compute gating matrix and likelihood values."""
|
|
358
358
|
gated = set()
|
|
359
359
|
likelihood_matrix = {}
|
|
@@ -387,8 +387,8 @@ class MHTTracker:
|
|
|
387
387
|
self,
|
|
388
388
|
association: Dict[int, int],
|
|
389
389
|
tracks: Dict[int, MHTTrack],
|
|
390
|
-
Z: NDArray,
|
|
391
|
-
likelihood_matrix:
|
|
390
|
+
Z: NDArray[np.floating],
|
|
391
|
+
likelihood_matrix: dict[tuple[int, int], float],
|
|
392
392
|
) -> float:
|
|
393
393
|
"""Compute likelihood of a joint association."""
|
|
394
394
|
likelihood = 1.0
|
|
@@ -417,7 +417,7 @@ class MHTTracker:
|
|
|
417
417
|
def _update_track(
|
|
418
418
|
self,
|
|
419
419
|
track: MHTTrack,
|
|
420
|
-
measurement: NDArray,
|
|
420
|
+
measurement: NDArray[np.floating],
|
|
421
421
|
meas_idx: int,
|
|
422
422
|
) -> MHTTrack:
|
|
423
423
|
"""Update a track with a measurement."""
|
|
@@ -502,7 +502,7 @@ class MHTTracker:
|
|
|
502
502
|
|
|
503
503
|
def _initiate_track(
|
|
504
504
|
self,
|
|
505
|
-
measurement: NDArray,
|
|
505
|
+
measurement: NDArray[np.floating],
|
|
506
506
|
meas_idx: int,
|
|
507
507
|
) -> MHTTrack:
|
|
508
508
|
"""Initiate a new track from a measurement."""
|
pytcl/trackers/multi_target.py
CHANGED
|
@@ -225,7 +225,7 @@ class MultiTargetTracker:
|
|
|
225
225
|
track.covariance = F @ track.covariance @ F.T + Q
|
|
226
226
|
track.time = self._time
|
|
227
227
|
|
|
228
|
-
def _associate(self, Z: NDArray[np.float64]) -> dict:
|
|
228
|
+
def _associate(self, Z: NDArray[np.float64]) -> dict[int, int]:
|
|
229
229
|
"""
|
|
230
230
|
Associate measurements to tracks using GNN.
|
|
231
231
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|