nrl-tracker 0.21.1__py3-none-any.whl → 0.21.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: nrl-tracker
3
- Version: 0.21.1
3
+ Version: 0.21.3
4
4
  Summary: Python port of the U.S. Naval Research Laboratory's Tracker Component Library for target tracking algorithms
5
5
  Author: Original: David F. Crouse, Naval Research Laboratory
6
6
  Maintainer: Python Port Contributors
@@ -60,7 +60,7 @@ Requires-Dist: plotly>=5.15.0; extra == "visualization"
60
60
 
61
61
  # Tracker Component Library (Python)
62
62
 
63
- [![PyPI version](https://img.shields.io/badge/pypi-v0.21.1-blue.svg)](https://pypi.org/project/nrl-tracker/)
63
+ [![PyPI version](https://img.shields.io/badge/pypi-v0.21.2-blue.svg)](https://pypi.org/project/nrl-tracker/)
64
64
  [![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
65
65
  [![License: Public Domain](https://img.shields.io/badge/License-Public%20Domain-brightgreen.svg)](https://en.wikipedia.org/wiki/Public_domain)
66
66
  [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
@@ -1,12 +1,12 @@
1
- pytcl/__init__.py,sha256=1grcwT7hqkF4MBmoUc9ugi2bhsecHLZuLZr8ALWSgC8,1894
1
+ pytcl/__init__.py,sha256=1ZCMTDp36EaKd4auK6bteUEONZBTOFp8IczTREadasM,1894
2
2
  pytcl/assignment_algorithms/__init__.py,sha256=f9V-TkEVmiKYYyth4PTpDfJvA7yYV_ys6Zix-QwWIYY,2136
3
3
  pytcl/assignment_algorithms/data_association.py,sha256=X6Kww9-J2WLxU1790w7dRvAFdM90RSXHvs7IF26zweQ,11427
4
4
  pytcl/assignment_algorithms/gating.py,sha256=fN_oAOkv7nYYOWE1oPOLrcCn3xEpKdMVlFSbRMAURxY,10815
5
- pytcl/assignment_algorithms/jpda.py,sha256=hvZGkSrStuw9iQg7gPuIV7_bOinX3Xix5vLrVLMnEMM,19288
5
+ pytcl/assignment_algorithms/jpda.py,sha256=Hv55j3J9qVwzlUfWdXdSasodTyB1ZKdgEpo5dBh95O8,19582
6
6
  pytcl/assignment_algorithms/three_dimensional/__init__.py,sha256=1Q40OUlUQoo7YKEucwdrSNo3D4A0Zibvkr8z4TpueBg,526
7
7
  pytcl/assignment_algorithms/three_dimensional/assignment.py,sha256=9BJhwlYu3JJ0kZ9sRyKKfpdvQdL4WYYHCtLbvaWycBw,19212
8
8
  pytcl/assignment_algorithms/two_dimensional/__init__.py,sha256=4Evsn__9hTfI2i8m8Ngl-Zy0Fa2OydKmDKlZlH6jaao,778
9
- pytcl/assignment_algorithms/two_dimensional/assignment.py,sha256=IRwbv-82dEbbm3KwjyUjuUWM3wP9s6oUtzX2mZffMjA,11419
9
+ pytcl/assignment_algorithms/two_dimensional/assignment.py,sha256=eh87MBb-uiUSI1MXj4HrreRKB6Z8rxAyDkNQ8-u4SbM,11848
10
10
  pytcl/assignment_algorithms/two_dimensional/kbest.py,sha256=yiTToLuP7xWxQlQ8E-fpgXg-5iu0nnXcJXStjUB0nOE,17284
11
11
  pytcl/astronomical/__init__.py,sha256=SKELDaDhxpvCo1dMswBQYOQr_Th3ShuzZTzxZMdhE-U,5650
12
12
  pytcl/astronomical/lambert.py,sha256=Lc8FT1JmpI9WSXsG2s5vIRkSoBSV7r5hd3o2bGh2Ojo,15607
@@ -16,10 +16,10 @@ pytcl/astronomical/time_systems.py,sha256=Jg0Zaq60hc4Ts1aQtb5bK4KSZhz-uQse8gYC89
16
16
  pytcl/atmosphere/__init__.py,sha256=QAYgJYzgs0kreRV8fByii4p477LCxBDfrXB_cL7SYkM,706
17
17
  pytcl/atmosphere/models.py,sha256=pMLv8D7qoFqLZrlbTHLJJULOdDdhPskJ1m7KVKLV63E,9584
18
18
  pytcl/clustering/__init__.py,sha256=bYdhC_XJEt6KUUni9bIPxaddXNEGmIJQvGkA14rK4J8,1697
19
- pytcl/clustering/dbscan.py,sha256=cjBi5b0gcBgFqUtkLm1G0JMJLQYALahyYuoSI4G0zAY,6903
19
+ pytcl/clustering/dbscan.py,sha256=PS6QlOwHFerbZNEb3zcNhN4oNQpgOOw5y0WskQzyKIo,7364
20
20
  pytcl/clustering/gaussian_mixture.py,sha256=U5U0Z46tZWdTLNdNNNJenoeviwZRAOvexVFYVLt4QMc,22865
21
- pytcl/clustering/hierarchical.py,sha256=PEkIGqAQ33InUGIcCRevv0rko4R1HqIwfejNhHsbnKQ,13794
22
- pytcl/clustering/kmeans.py,sha256=fmUIQdQLKCocwmMabdReQ0cKqMNKNFI_DyTCivPmAtw,10842
21
+ pytcl/clustering/hierarchical.py,sha256=Hw9BFCn5df_ATpJX63R3B31MHz27ztCw9ihMDIlI688,14202
22
+ pytcl/clustering/kmeans.py,sha256=250FQyDol5S_Y4TznNn9cEuE96UDp7wvEkPZJ1DLul8,10697
23
23
  pytcl/containers/__init__.py,sha256=t8oRtusBrh6G2dEk2PcofmxrpLPQ9nOJwH19GyKnNcc,1699
24
24
  pytcl/containers/cluster_set.py,sha256=_lZ39PNHTL7fUEZAwBF2ICK6v0GjZKpeUOg0knEdPzo,22760
25
25
  pytcl/containers/covertree.py,sha256=1JWqXxoUFLxuMnjwj2qf0iz2uPzdujQYdwJW3l5qsOs,13282
@@ -43,18 +43,18 @@ pytcl/core/array_utils.py,sha256=SsgEiAoRCWxAVKq1aa5-nPdOi-2AB6XNObu0IaGClUk,139
43
43
  pytcl/core/constants.py,sha256=lZVDK5zsSR02_4b2Nqx9KDtZT9QaYhkZ9wuoODbifd4,8693
44
44
  pytcl/core/validation.py,sha256=WRlzMlUihtqc3XZoWOTFK0sBAZVDIwTMGCiWcX5OZVY,13093
45
45
  pytcl/dynamic_estimation/__init__.py,sha256=jA5FF6kHYklY5LMOfZaKcCeiPTpVe8vHIMp3ECDOmsc,4582
46
- pytcl/dynamic_estimation/imm.py,sha256=CoOwJJv0DMrNKRkP-OqWbSxZO_-GAgNLaQ4KiBhjvEg,21313
46
+ pytcl/dynamic_estimation/imm.py,sha256=t4dlutWeCLtAMl-ylNbxMUC3gbRBF_sXI3bX4PAk-OQ,22080
47
47
  pytcl/dynamic_estimation/information_filter.py,sha256=x7iQwO_iJT1dCSvDws5LqD3yAtjw9QVGUfMPcXn1IA4,17349
48
48
  pytcl/dynamic_estimation/smoothers.py,sha256=qC_g0YG0U4L_7rGBpcZzYcb11A--Hc8tb0fxWXIJdxM,18931
49
49
  pytcl/dynamic_estimation/batch_estimation/__init__.py,sha256=JQ0s76Enov5a7plA4EnUua4t-7etikQrwr5z4WIjUeo,46
50
50
  pytcl/dynamic_estimation/kalman/__init__.py,sha256=yoFLj0n-NRkdZnRVL-BkHBlATk8pfZEVlsY3BhSYgKc,2387
51
51
  pytcl/dynamic_estimation/kalman/extended.py,sha256=_deQTnUGOp_BlhP-FDEY0LOjgUMN32FQn0V12unCM4A,10397
52
- pytcl/dynamic_estimation/kalman/linear.py,sha256=sTfWt_yDxyQCA0SMjOx4xfhVnq3nReOVUObXuUuZRv8,11844
52
+ pytcl/dynamic_estimation/kalman/linear.py,sha256=1Zgg9gZya0Vxs9im7sPUqLj0Luo463vS-RSa6GCReFI,12248
53
53
  pytcl/dynamic_estimation/kalman/square_root.py,sha256=Hw1F4_Zc7IA6Mt1WCkjx1UuLAUmNhM5vPLvueb7oRSA,26931
54
- pytcl/dynamic_estimation/kalman/unscented.py,sha256=NXPBTAf6814Yw5HTFpX21w_Y0eSC6q1gZSRUHHYEQjI,14943
54
+ pytcl/dynamic_estimation/kalman/unscented.py,sha256=VmYE8LuM1nWpFTmD39iXdEZ3m41IsurCCbXJ19-ERFs,15440
55
55
  pytcl/dynamic_estimation/measurement_update/__init__.py,sha256=8rlyJwVpxf0fZj-AFo1hlewvryZRhUzcy3F8uMe6I8c,48
56
56
  pytcl/dynamic_estimation/particle_filters/__init__.py,sha256=-DRF5rVF2749suLlArmkTvVkqeMcV_mIx0eLeTj6wNU,906
57
- pytcl/dynamic_estimation/particle_filters/bootstrap.py,sha256=EfF9w0xqyBIYOHIwoYRt7D8aCgdPoGpvaFINu8gCZ80,12396
57
+ pytcl/dynamic_estimation/particle_filters/bootstrap.py,sha256=FcF4W_NM5ZqJnw5fq4rq6fLY9X1r4uFJOiAX9a-NGG8,13371
58
58
  pytcl/dynamic_models/__init__.py,sha256=Cd8MyyYuB8gMnepkPA-HSwTaKFPThnqoKOhdjVOsXWg,2783
59
59
  pytcl/dynamic_models/continuous_time/__init__.py,sha256=dAkfEddLkfMvDalK9v2GRBvaZV1KgqYpFBLOnoiFClw,1023
60
60
  pytcl/dynamic_models/continuous_time/dynamics.py,sha256=CDwqn-66eUwXA5xfIjaG6A4EDBqtOyQ3aWarJr9QH4g,12858
@@ -141,8 +141,8 @@ pytcl/trackers/mht.py,sha256=7mwhMmja3ri2wnx7W1wueDGn2r3ArwAxJDPUJ7IZAkQ,20617
141
141
  pytcl/trackers/multi_target.py,sha256=7ZL8V25TO_rEMtQm2eYkScesDQHC9qXZVHLHyVbxy3M,10529
142
142
  pytcl/trackers/single_target.py,sha256=Yy3FwaNTArMWcaod-0HVeiioNV4xLWxNDn_7ZPVqQYs,6562
143
143
  pytcl/transponders/__init__.py,sha256=5fL4u3lKCYgPLo5uFeuZbtRZkJPABntuKYGUvVgMMEI,41
144
- nrl_tracker-0.21.1.dist-info/LICENSE,sha256=rB5G4WppIIUzMOYr2N6uyYlNJ00hRJqE5tie6BMvYuE,1612
145
- nrl_tracker-0.21.1.dist-info/METADATA,sha256=Do-PM_-vvQ08w8QWNyvIuRmsAEqzFXe8NSO5BXLTlHg,10005
146
- nrl_tracker-0.21.1.dist-info/WHEEL,sha256=pL8R0wFFS65tNSRnaOVrsw9EOkOqxLrlUPenUYnJKNo,91
147
- nrl_tracker-0.21.1.dist-info/top_level.txt,sha256=17megxcrTPBWwPZTh6jTkwTKxX7No-ZqRpyvElnnO-s,6
148
- nrl_tracker-0.21.1.dist-info/RECORD,,
144
+ nrl_tracker-0.21.3.dist-info/LICENSE,sha256=rB5G4WppIIUzMOYr2N6uyYlNJ00hRJqE5tie6BMvYuE,1612
145
+ nrl_tracker-0.21.3.dist-info/METADATA,sha256=qpc9zHOAvOjQNhV2ziu4DS4eJE5bSNtUHlwImSyTsbo,10005
146
+ nrl_tracker-0.21.3.dist-info/WHEEL,sha256=pL8R0wFFS65tNSRnaOVrsw9EOkOqxLrlUPenUYnJKNo,91
147
+ nrl_tracker-0.21.3.dist-info/top_level.txt,sha256=17megxcrTPBWwPZTh6jTkwTKxX7No-ZqRpyvElnnO-s,6
148
+ nrl_tracker-0.21.3.dist-info/RECORD,,
pytcl/__init__.py CHANGED
@@ -20,7 +20,7 @@ References
20
20
  no. 5, pp. 18-27, May 2017.
21
21
  """
22
22
 
23
- __version__ = "0.18.0"
23
+ __version__ = "0.21.3"
24
24
  __author__ = "Python Port Contributors"
25
25
  __original_author__ = "David F. Crouse, Naval Research Laboratory"
26
26
 
@@ -12,6 +12,7 @@ as JPDA can handle measurement origin uncertainty in cluttered environments.
12
12
  from typing import List, NamedTuple, Optional, Tuple
13
13
 
14
14
  import numpy as np
15
+ from numba import njit
15
16
  from numpy.typing import ArrayLike, NDArray
16
17
  from scipy.stats import chi2
17
18
 
@@ -314,45 +315,34 @@ def _jpda_exact(
314
315
  return beta
315
316
 
316
317
 
317
- def _jpda_approximate(
318
- likelihood_matrix: NDArray,
319
- gated: NDArray,
318
+ @njit(cache=True)
319
+ def _jpda_approximate_core(
320
+ likelihood_matrix: np.ndarray,
321
+ gated: np.ndarray,
320
322
  detection_prob: float,
321
323
  clutter_density: float,
322
- ) -> NDArray:
323
- """
324
- Approximate JPDA using parametric approach.
325
-
326
- Uses the approach from [1] which is O(n_tracks * n_meas^2).
327
-
328
- References
329
- ----------
330
- .. [1] Fitzgerald, R.J., "Development of Practical PDA Logic for
331
- Multitarget Tracking by Microprocessor", American Control
332
- Conference, 1986.
333
- """
334
- n_tracks, n_meas = likelihood_matrix.shape
335
- beta = np.zeros((n_tracks, n_meas + 1))
336
-
337
- # For each track, compute association probabilities independently
338
- # then apply correction for shared measurements
324
+ ) -> np.ndarray:
325
+ """JIT-compiled core of approximate JPDA computation."""
326
+ n_tracks = likelihood_matrix.shape[0]
327
+ n_meas = likelihood_matrix.shape[1]
328
+ beta = np.zeros((n_tracks, n_meas + 1), dtype=np.float64)
339
329
 
340
330
  # Likelihood ratio for each measurement given each track
341
- # L[i,j] = likelihood(z_j | track i) / clutter_density
342
- L = np.zeros((n_tracks, n_meas))
331
+ L = np.zeros((n_tracks, n_meas), dtype=np.float64)
343
332
  for i in range(n_tracks):
344
333
  for j in range(n_meas):
345
334
  if gated[i, j] and clutter_density > 0:
346
335
  L[i, j] = likelihood_matrix[i, j] / clutter_density
347
336
  elif gated[i, j]:
348
- L[i, j] = likelihood_matrix[i, j] * 1e10 # Large value
337
+ L[i, j] = likelihood_matrix[i, j] * 1e10
349
338
 
350
339
  # Compute delta factors (accounts for other tracks)
351
- delta = np.ones((n_tracks, n_meas))
340
+ delta = np.ones((n_tracks, n_meas), dtype=np.float64)
352
341
 
353
342
  for j in range(n_meas):
354
- # Sum of likelihood ratios for measurement j
355
- sum_L = np.sum(L[:, j])
343
+ sum_L = 0.0
344
+ for i in range(n_tracks):
345
+ sum_L += L[i, j]
356
346
  for i in range(n_tracks):
357
347
  if sum_L > 0:
358
348
  delta[i, j] = 1.0 / (1.0 + sum_L - L[i, j])
@@ -361,7 +351,6 @@ def _jpda_approximate(
361
351
 
362
352
  # Compute association probabilities
363
353
  for i in range(n_tracks):
364
- # Denominator for normalization
365
354
  denom = 1.0 - detection_prob
366
355
 
367
356
  for j in range(n_meas):
@@ -369,9 +358,9 @@ def _jpda_approximate(
369
358
  beta[i, j] = detection_prob * L[i, j] * delta[i, j]
370
359
  denom += beta[i, j]
371
360
 
372
- # Normalize
373
361
  if denom > 0:
374
- beta[i, :n_meas] /= denom
362
+ for j in range(n_meas):
363
+ beta[i, j] /= denom
375
364
  beta[i, n_meas] = (1.0 - detection_prob) / denom
376
365
  else:
377
366
  beta[i, n_meas] = 1.0
@@ -379,6 +368,31 @@ def _jpda_approximate(
379
368
  return beta
380
369
 
381
370
 
371
+ def _jpda_approximate(
372
+ likelihood_matrix: NDArray,
373
+ gated: NDArray,
374
+ detection_prob: float,
375
+ clutter_density: float,
376
+ ) -> NDArray:
377
+ """
378
+ Approximate JPDA using parametric approach.
379
+
380
+ Uses the approach from [1] which is O(n_tracks * n_meas^2).
381
+
382
+ References
383
+ ----------
384
+ .. [1] Fitzgerald, R.J., "Development of Practical PDA Logic for
385
+ Multitarget Tracking by Microprocessor", American Control
386
+ Conference, 1986.
387
+ """
388
+ return _jpda_approximate_core(
389
+ likelihood_matrix.astype(np.float64),
390
+ gated.astype(np.bool_),
391
+ detection_prob,
392
+ clutter_density,
393
+ )
394
+
395
+
382
396
  def jpda_update(
383
397
  track_states: List[ArrayLike],
384
398
  track_covariances: List[ArrayLike],
@@ -211,14 +211,21 @@ def auction(
211
211
  # Compute values: value[j] = -cost[i,j] - prices[j]
212
212
  values = -cost[i, :] - prices
213
213
 
214
- # Find best and second best
215
- sorted_idx = np.argsort(values)[::-1]
216
- best_j = sorted_idx[0]
217
- best_value = values[best_j]
218
-
219
- if len(sorted_idx) > 1:
220
- second_value = values[sorted_idx[1]]
214
+ # Find best and second best using argpartition (O(n) vs O(n log n))
215
+ if len(values) >= 2:
216
+ # Get indices of top 2 values
217
+ top2_idx = np.argpartition(values, -2)[-2:]
218
+ # Determine which is best and second best
219
+ if values[top2_idx[0]] > values[top2_idx[1]]:
220
+ best_j = top2_idx[0]
221
+ second_value = values[top2_idx[1]]
222
+ else:
223
+ best_j = top2_idx[1]
224
+ second_value = values[top2_idx[0]]
225
+ best_value = values[best_j]
221
226
  else:
227
+ best_j = np.argmax(values)
228
+ best_value = values[best_j]
222
229
  second_value = -np.inf
223
230
 
224
231
  # Bid increment
@@ -15,6 +15,7 @@ References
15
15
  from typing import List, NamedTuple, Set
16
16
 
17
17
  import numpy as np
18
+ from numba import njit
18
19
  from numpy.typing import ArrayLike, NDArray
19
20
 
20
21
 
@@ -40,6 +41,23 @@ class DBSCANResult(NamedTuple):
40
41
  n_noise: int
41
42
 
42
43
 
44
+ @njit(cache=True)
45
+ def _compute_distance_matrix(X: np.ndarray) -> np.ndarray:
46
+ """Compute pairwise Euclidean distance matrix (JIT-compiled)."""
47
+ n = X.shape[0]
48
+ dist = np.zeros((n, n), dtype=np.float64)
49
+ for i in range(n):
50
+ for j in range(i + 1, n):
51
+ d = 0.0
52
+ for k in range(X.shape[1]):
53
+ diff = X[i, k] - X[j, k]
54
+ d += diff * diff
55
+ d = np.sqrt(d)
56
+ dist[i, j] = d
57
+ dist[j, i] = d
58
+ return dist
59
+
60
+
43
61
  def compute_neighbors(
44
62
  X: NDArray[np.floating],
45
63
  eps: float,
@@ -60,13 +78,13 @@ def compute_neighbors(
60
78
  neighbors[i] contains indices of points within eps of point i.
61
79
  """
62
80
  n_samples = X.shape[0]
63
- neighbors = []
64
81
 
82
+ # Use JIT-compiled distance matrix computation
83
+ dist_matrix = _compute_distance_matrix(X)
84
+
85
+ neighbors = []
65
86
  for i in range(n_samples):
66
- # Compute distances from point i to all points
67
- distances = np.sqrt(np.sum((X - X[i]) ** 2, axis=1))
68
- # Find points within eps (including self)
69
- neighbor_indices = np.where(distances <= eps)[0]
87
+ neighbor_indices = np.where(dist_matrix[i] <= eps)[0]
70
88
  neighbors.append(neighbor_indices)
71
89
 
72
90
  return neighbors
@@ -15,6 +15,7 @@ from enum import Enum
15
15
  from typing import List, Literal, NamedTuple, Optional
16
16
 
17
17
  import numpy as np
18
+ from numba import njit
18
19
  from numpy.typing import ArrayLike, NDArray
19
20
 
20
21
 
@@ -70,6 +71,26 @@ class HierarchicalResult(NamedTuple):
70
71
  dendrogram: List[DendrogramNode]
71
72
 
72
73
 
74
+ @njit(cache=True)
75
+ def _compute_distance_matrix_jit(X: np.ndarray) -> np.ndarray:
76
+ """JIT-compiled pairwise Euclidean distance computation."""
77
+ n = X.shape[0]
78
+ n_features = X.shape[1]
79
+ distances = np.zeros((n, n), dtype=np.float64)
80
+
81
+ for i in range(n):
82
+ for j in range(i + 1, n):
83
+ d = 0.0
84
+ for k in range(n_features):
85
+ diff = X[i, k] - X[j, k]
86
+ d += diff * diff
87
+ d = np.sqrt(d)
88
+ distances[i, j] = d
89
+ distances[j, i] = d
90
+
91
+ return distances
92
+
93
+
73
94
  def compute_distance_matrix(
74
95
  X: NDArray[np.floating],
75
96
  ) -> NDArray[np.floating]:
@@ -86,16 +107,8 @@ def compute_distance_matrix(
86
107
  distances : ndarray
87
108
  Distance matrix, shape (n_samples, n_samples).
88
109
  """
89
- n = X.shape[0]
90
- distances = np.zeros((n, n))
91
-
92
- for i in range(n):
93
- for j in range(i + 1, n):
94
- d = np.sqrt(np.sum((X[i] - X[j]) ** 2))
95
- distances[i, j] = d
96
- distances[j, i] = d
97
-
98
- return distances
110
+ X = np.asarray(X, dtype=np.float64)
111
+ return _compute_distance_matrix_jit(X)
99
112
 
100
113
 
101
114
  def _single_linkage(
@@ -14,6 +14,7 @@ from typing import Literal, NamedTuple, Optional, Union
14
14
 
15
15
  import numpy as np
16
16
  from numpy.typing import ArrayLike, NDArray
17
+ from scipy.spatial.distance import cdist
17
18
 
18
19
 
19
20
  class KMeansResult(NamedTuple):
@@ -91,11 +92,8 @@ def kmeans_plusplus_init(
91
92
 
92
93
  # Subsequent centers: sample proportional to D^2
93
94
  for k in range(1, n_clusters):
94
- # Compute squared distances to nearest center
95
- distances_sq = np.full(n_samples, np.inf)
96
- for j in range(k):
97
- d_sq = np.sum((X - centers[j]) ** 2, axis=1)
98
- distances_sq = np.minimum(distances_sq, d_sq)
95
+ # Compute squared distances to nearest center (vectorized via cdist)
96
+ distances_sq = cdist(X, centers[:k], metric="sqeuclidean").min(axis=1)
99
97
 
100
98
  # Sample proportional to D^2
101
99
  probs = distances_sq / distances_sq.sum()
@@ -138,12 +136,9 @@ def assign_clusters(
138
136
  centers = np.asarray(centers, dtype=np.float64)
139
137
 
140
138
  n_samples = X.shape[0]
141
- n_clusters = centers.shape[0]
142
139
 
143
- # Compute distances to all centers
144
- distances_sq = np.zeros((n_samples, n_clusters))
145
- for k in range(n_clusters):
146
- distances_sq[:, k] = np.sum((X - centers[k]) ** 2, axis=1)
140
+ # Compute squared distances to all centers (vectorized via cdist)
141
+ distances_sq = cdist(X, centers, metric="sqeuclidean")
147
142
 
148
143
  # Assign to nearest center
149
144
  labels = np.argmin(distances_sq, axis=1).astype(np.intp)
@@ -131,15 +131,16 @@ def compute_mixing_probabilities(
131
131
  # Predicted mode probabilities: c_bar[j] = sum_i Pi[i,j] * mu[i]
132
132
  c_bar = Pi.T @ mode_probs
133
133
 
134
- # Mixing probabilities: mu[i|j] = Pi[i,j] * mu[i] / c_bar[j]
135
- mixing_probs = np.zeros((r, r))
136
- for j in range(r):
137
- if c_bar[j] > 1e-15:
138
- for i in range(r):
139
- mixing_probs[i, j] = Pi[i, j] * mode_probs[i] / c_bar[j]
140
- else:
141
- # Uniform if predicted probability is zero
142
- mixing_probs[:, j] = 1.0 / r
134
+ # Mixing probabilities: mu[i|j] = Pi[i,j] * mu[i] / c_bar[j] (vectorized)
135
+ # Compute numerator: Pi[i,j] * mu[i] for all i,j
136
+ numerator = Pi * mode_probs[:, np.newaxis]
137
+ # Divide by c_bar (with safe division for near-zero values)
138
+ safe_c_bar = np.where(c_bar > 1e-15, c_bar, 1.0)
139
+ mixing_probs = numerator / safe_c_bar
140
+ # Set uniform for columns where c_bar was too small
141
+ zero_mask = c_bar <= 1e-15
142
+ if np.any(zero_mask):
143
+ mixing_probs[:, zero_mask] = 1.0 / r
143
144
 
144
145
  return mixing_probs, c_bar
145
146
 
@@ -169,23 +170,30 @@ def mix_states(
169
170
  Mixed covariances for each mode.
170
171
  """
171
172
  r = len(mode_states)
172
- n = len(mode_states[0])
173
+
174
+ # Stack states and covariances for vectorized operations
175
+ states_array = np.array(mode_states) # shape (r, n)
176
+ covs_array = np.array(mode_covs) # shape (r, n, n)
173
177
 
174
178
  mixed_states = []
175
179
  mixed_covs = []
176
180
 
177
181
  for j in range(r):
178
- # Mixed state: x_0j = sum_i mu[i|j] * x_i
179
- x_mixed = np.zeros(n)
180
- for i in range(r):
181
- x_mixed += mixing_probs[i, j] * mode_states[i]
182
- mixed_states.append(x_mixed)
182
+ # Mixed state: x_0j = sum_i mu[i|j] * x_i (vectorized)
183
+ x_mixed = mixing_probs[:, j] @ states_array
183
184
 
184
185
  # Mixed covariance: P_0j = sum_i mu[i|j] * (P_i + (x_i - x_0j)(x_i - x_0j)^T)
185
- P_mixed = np.zeros((n, n))
186
- for i in range(r):
187
- diff = mode_states[i] - x_mixed
188
- P_mixed += mixing_probs[i, j] * (mode_covs[i] + np.outer(diff, diff))
186
+ # Compute differences for all modes at once
187
+ diffs = states_array - x_mixed # shape (r, n)
188
+ # Weighted covariances + outer products (vectorized)
189
+ weights = mixing_probs[:, j]
190
+ # Weighted sum of covariances
191
+ P_mixed = np.tensordot(weights, covs_array, axes=([0], [0]))
192
+ # Add weighted outer products: sum_i w_i * outer(diff_i, diff_i)
193
+ weighted_diffs = np.sqrt(weights)[:, np.newaxis] * diffs
194
+ P_mixed += weighted_diffs.T @ weighted_diffs
195
+
196
+ mixed_states.append(x_mixed)
189
197
  mixed_covs.append(P_mixed)
190
198
 
191
199
  return mixed_states, mixed_covs
@@ -215,19 +223,20 @@ def combine_estimates(
215
223
  P : ndarray
216
224
  Combined covariance.
217
225
  """
218
- r = len(mode_states)
219
- n = len(mode_states[0])
220
-
221
- # Combined state: x = sum_j mu_j * x_j
222
- x = np.zeros(n)
223
- for j in range(r):
224
- x += mode_probs[j] * mode_states[j]
225
-
226
- # Combined covariance: P = sum_j mu_j * (P_j + (x_j - x)(x_j - x)^T)
227
- P = np.zeros((n, n))
228
- for j in range(r):
229
- diff = mode_states[j] - x
230
- P += mode_probs[j] * (mode_covs[j] + np.outer(diff, diff))
226
+ # Stack states and covariances for vectorized operations
227
+ states_array = np.array(mode_states) # shape (r, n)
228
+ covs_array = np.array(mode_covs) # shape (r, n, n)
229
+
230
+ # Combined state: x = sum_j mu_j * x_j (vectorized)
231
+ x = mode_probs @ states_array
232
+
233
+ # Combined covariance: P = sum_j mu_j * (P_j + (x_j - x)(x_j - x)^T) (vectorized)
234
+ diffs = states_array - x # shape (r, n)
235
+ # Weighted sum of covariances
236
+ P = np.tensordot(mode_probs, covs_array, axes=([0], [0]))
237
+ # Add weighted outer products
238
+ weighted_diffs = np.sqrt(mode_probs)[:, np.newaxis] * diffs
239
+ P += weighted_diffs.T @ weighted_diffs
231
240
 
232
241
  # Ensure symmetry
233
242
  P = (P + P.T) / 2
@@ -9,6 +9,7 @@ from typing import NamedTuple, Optional, Tuple
9
9
 
10
10
  import numpy as np
11
11
  from numpy.typing import ArrayLike, NDArray
12
+ from scipy.linalg import cho_factor, cho_solve
12
13
 
13
14
 
14
15
  class KalmanState(NamedTuple):
@@ -204,9 +205,22 @@ def kf_update(
204
205
  # Innovation covariance
205
206
  S = H @ P @ H.T + R
206
207
 
207
- # Kalman gain using solve for numerical stability
208
- # K = P @ H' @ S^{-1}
209
- K = np.linalg.solve(S.T, H @ P.T).T
208
+ # Use Cholesky decomposition for efficient solving (reused for gain and likelihood)
209
+ # This is more numerically stable and efficient than repeated solve() calls
210
+ try:
211
+ S_cho = cho_factor(S)
212
+ # Kalman gain: K = P @ H' @ S^{-1}
213
+ K = cho_solve(S_cho, H @ P.T).T
214
+ # Mahalanobis distance for likelihood
215
+ mahal_sq = y @ cho_solve(S_cho, y)
216
+ # Log determinant from Cholesky factor (more stable than det)
217
+ log_det_S = 2 * np.sum(np.log(np.diag(S_cho[0])))
218
+ m = len(z)
219
+ likelihood = np.exp(-0.5 * (mahal_sq + log_det_S + m * np.log(2 * np.pi)))
220
+ except np.linalg.LinAlgError:
221
+ # Fallback if Cholesky fails (S not positive definite)
222
+ K = np.linalg.solve(S.T, H @ P.T).T
223
+ likelihood = 0.0
210
224
 
211
225
  # Updated state
212
226
  x_upd = x + K @ y
@@ -218,16 +232,6 @@ def kf_update(
218
232
  # Ensure symmetry
219
233
  P_upd = (P_upd + P_upd.T) / 2
220
234
 
221
- # Compute likelihood for data association
222
- # p(z|x) = N(z; H@x, S)
223
- m = len(z)
224
- det_S = np.linalg.det(S)
225
- if det_S > 0:
226
- mahal_sq = y @ np.linalg.solve(S, y)
227
- likelihood = np.exp(-0.5 * mahal_sq) / np.sqrt((2 * np.pi) ** m * det_S)
228
- else:
229
- likelihood = 0.0
230
-
231
235
  return KalmanUpdate(
232
236
  x=x_upd,
233
237
  P=P_upd,
@@ -203,11 +203,19 @@ def unscented_transform(
203
203
  # Weighted mean
204
204
  mean = np.sum(Wm[:, np.newaxis] * sigmas, axis=0)
205
205
 
206
- # Weighted covariance
206
+ # Weighted covariance (vectorized: avoids loop over sigma points)
207
207
  residuals = sigmas - mean
208
- cov = np.zeros((sigmas.shape[1], sigmas.shape[1]), dtype=np.float64)
209
- for i in range(len(sigmas)):
210
- cov += Wc[i] * np.outer(residuals[i], residuals[i])
208
+ # Compute weighted outer products in one operation: (W * residuals)^T @ residuals
209
+ weighted_residuals = np.sqrt(np.abs(Wc))[:, np.newaxis] * residuals
210
+ # Handle negative weights (e.g., from Merwe scaling) by adjusting sign
211
+ cov = weighted_residuals.T @ weighted_residuals
212
+ # Correct for any negative weights (subtract their contribution twice to flip sign)
213
+ neg_mask = Wc < 0
214
+ if np.any(neg_mask):
215
+ neg_residuals = residuals[neg_mask]
216
+ neg_weights = -Wc[neg_mask]
217
+ for i, (w, r) in enumerate(zip(neg_weights, neg_residuals)):
218
+ cov -= 2 * w * np.outer(r, r)
211
219
 
212
220
  if noise_cov is not None:
213
221
  cov += np.asarray(noise_cov, dtype=np.float64)
@@ -329,7 +337,6 @@ def ukf_update(
329
337
  z = np.asarray(z, dtype=np.float64).flatten()
330
338
  R = np.asarray(R, dtype=np.float64)
331
339
 
332
- n = len(x)
333
340
  m = len(z)
334
341
 
335
342
  # Generate sigma points
@@ -341,10 +348,11 @@ def ukf_update(
341
348
  # Predicted measurement mean and covariance
342
349
  z_pred, S = unscented_transform(sigmas_h, sp.Wm, sp.Wc, R)
343
350
 
344
- # Cross-covariance between state and measurement
345
- Pxz = np.zeros((n, m), dtype=np.float64)
346
- for i in range(len(sp.points)):
347
- Pxz += sp.Wc[i] * np.outer(sp.points[i] - x, sigmas_h[i] - z_pred)
351
+ # Cross-covariance between state and measurement (vectorized)
352
+ x_residuals = sp.points - x
353
+ z_residuals = sigmas_h - z_pred
354
+ # Weighted cross-covariance: sum of Wc[i] * outer(x_res[i], z_res[i])
355
+ Pxz = (sp.Wc[:, np.newaxis] * x_residuals).T @ z_residuals
348
356
 
349
357
  # Kalman gain
350
358
  K = np.linalg.solve(S.T, Pxz.T).T
@@ -467,12 +475,11 @@ def ckf_predict(
467
475
  # Predicted mean
468
476
  x_pred = np.sum(weights[:, np.newaxis] * transformed, axis=0)
469
477
 
470
- # Predicted covariance
478
+ # Predicted covariance (vectorized)
471
479
  residuals = transformed - x_pred
472
- P_pred = np.zeros((n, n), dtype=np.float64)
473
- for i in range(len(cubature_pts)):
474
- P_pred += weights[i] * np.outer(residuals[i], residuals[i])
475
- P_pred += Q
480
+ # All CKF weights are equal and positive, so vectorization is straightforward
481
+ weighted_residuals = np.sqrt(weights)[:, np.newaxis] * residuals
482
+ P_pred = weighted_residuals.T @ weighted_residuals + Q
476
483
 
477
484
  P_pred = (P_pred + P_pred.T) / 2
478
485
 
@@ -533,18 +540,14 @@ def ckf_update(
533
540
  # Predicted measurement
534
541
  z_pred = np.sum(weights[:, np.newaxis] * transformed, axis=0)
535
542
 
536
- # Innovation covariance
543
+ # Innovation covariance (vectorized)
537
544
  z_residuals = transformed - z_pred
538
- S = np.zeros((m, m), dtype=np.float64)
539
- for i in range(len(cubature_pts)):
540
- S += weights[i] * np.outer(z_residuals[i], z_residuals[i])
541
- S += R
545
+ weighted_z_residuals = np.sqrt(weights)[:, np.newaxis] * z_residuals
546
+ S = weighted_z_residuals.T @ weighted_z_residuals + R
542
547
 
543
- # Cross-covariance
548
+ # Cross-covariance (vectorized)
544
549
  x_residuals = cubature_pts - x
545
- Pxz = np.zeros((n, m), dtype=np.float64)
546
- for i in range(len(cubature_pts)):
547
- Pxz += weights[i] * np.outer(x_residuals[i], z_residuals[i])
550
+ Pxz = (weights[:, np.newaxis] * x_residuals).T @ z_residuals
548
551
 
549
552
  # Kalman gain
550
553
  K = np.linalg.solve(S.T, Pxz.T).T
@@ -8,6 +8,7 @@ state estimation.
8
8
  from typing import Callable, NamedTuple, Optional, Tuple
9
9
 
10
10
  import numpy as np
11
+ from numba import njit
11
12
  from numpy.typing import ArrayLike, NDArray
12
13
 
13
14
 
@@ -99,6 +100,27 @@ def resample_systematic(
99
100
  return particles[indices].copy()
100
101
 
101
102
 
103
+ @njit(cache=True)
104
+ def _resample_residual_deterministic(
105
+ particles: np.ndarray,
106
+ floor_Nw: np.ndarray,
107
+ ) -> Tuple[np.ndarray, int]:
108
+ """JIT-compiled deterministic copy portion of residual resampling."""
109
+ N = particles.shape[0]
110
+ n = particles.shape[1]
111
+ resampled = np.zeros((N, n), dtype=np.float64)
112
+
113
+ idx = 0
114
+ for i in range(N):
115
+ count = floor_Nw[i]
116
+ for _ in range(count):
117
+ for k in range(n):
118
+ resampled[idx, k] = particles[i, k]
119
+ idx += 1
120
+
121
+ return resampled, idx
122
+
123
+
102
124
  def resample_residual(
103
125
  particles: NDArray[np.floating],
104
126
  weights: NDArray[np.floating],
@@ -128,24 +150,16 @@ def resample_residual(
128
150
  rng = np.random.default_rng()
129
151
 
130
152
  N = len(weights)
131
- n = particles.shape[1]
132
153
 
133
154
  # Integer and fractional parts
134
155
  Nw = N * weights
135
- floor_Nw = np.floor(Nw).astype(int)
156
+ floor_Nw = np.floor(Nw).astype(np.int64)
136
157
  residual = Nw - floor_Nw
137
158
 
138
- # Number of deterministic copies (used implicitly via floor_Nw loop)
139
-
140
- # Allocate output
141
- resampled = np.zeros((N, n), dtype=np.float64)
142
-
143
- # Deterministic copies
144
- idx = 0
145
- for i in range(N):
146
- for _ in range(floor_Nw[i]):
147
- resampled[idx] = particles[i]
148
- idx += 1
159
+ # Deterministic copies (JIT-compiled)
160
+ resampled, idx = _resample_residual_deterministic(
161
+ particles.astype(np.float64), floor_Nw
162
+ )
149
163
 
150
164
  # Multinomial resampling of residuals
151
165
  if idx < N:
@@ -410,6 +424,31 @@ def particle_mean(
410
424
  return np.sum(weights[:, np.newaxis] * particles, axis=0)
411
425
 
412
426
 
427
+ @njit(cache=True)
428
+ def _particle_covariance_core(
429
+ particles: np.ndarray,
430
+ weights: np.ndarray,
431
+ mean: np.ndarray,
432
+ ) -> np.ndarray:
433
+ """JIT-compiled core for particle covariance computation."""
434
+ N = particles.shape[0]
435
+ n = particles.shape[1]
436
+ cov = np.zeros((n, n), dtype=np.float64)
437
+
438
+ for i in range(N):
439
+ w = weights[i]
440
+ for j in range(n):
441
+ diff_j = particles[i, j] - mean[j]
442
+ for k in range(j, n):
443
+ diff_k = particles[i, k] - mean[k]
444
+ val = w * diff_j * diff_k
445
+ cov[j, k] += val
446
+ if j != k:
447
+ cov[k, j] += val
448
+
449
+ return cov
450
+
451
+
413
452
  def particle_covariance(
414
453
  particles: NDArray[np.floating],
415
454
  weights: NDArray[np.floating],
@@ -435,12 +474,11 @@ def particle_covariance(
435
474
  if mean is None:
436
475
  mean = particle_mean(particles, weights)
437
476
 
438
- residuals = particles - mean
439
- cov = np.zeros((particles.shape[1], particles.shape[1]), dtype=np.float64)
440
- for i in range(len(particles)):
441
- cov += weights[i] * np.outer(residuals[i], residuals[i])
442
-
443
- return cov
477
+ return _particle_covariance_core(
478
+ particles.astype(np.float64),
479
+ weights.astype(np.float64),
480
+ mean.astype(np.float64),
481
+ )
444
482
 
445
483
 
446
484
  def initialize_particles(