nrl-tracker 1.6.0__py3-none-any.whl → 1.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/METADATA +14 -10
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/RECORD +75 -68
- pytcl/__init__.py +2 -2
- pytcl/assignment_algorithms/__init__.py +28 -0
- pytcl/assignment_algorithms/gating.py +10 -10
- pytcl/assignment_algorithms/jpda.py +40 -40
- pytcl/assignment_algorithms/nd_assignment.py +379 -0
- pytcl/assignment_algorithms/network_flow.py +371 -0
- pytcl/assignment_algorithms/three_dimensional/assignment.py +3 -3
- pytcl/astronomical/__init__.py +35 -0
- pytcl/astronomical/ephemerides.py +14 -11
- pytcl/astronomical/reference_frames.py +110 -4
- pytcl/astronomical/relativity.py +6 -5
- pytcl/astronomical/special_orbits.py +532 -0
- pytcl/atmosphere/__init__.py +11 -0
- pytcl/atmosphere/nrlmsise00.py +809 -0
- pytcl/clustering/dbscan.py +2 -2
- pytcl/clustering/gaussian_mixture.py +3 -3
- pytcl/clustering/hierarchical.py +15 -15
- pytcl/clustering/kmeans.py +4 -4
- pytcl/containers/base.py +3 -3
- pytcl/containers/cluster_set.py +12 -2
- pytcl/containers/covertree.py +5 -3
- pytcl/containers/rtree.py +1 -1
- pytcl/containers/vptree.py +4 -2
- pytcl/coordinate_systems/conversions/geodetic.py +272 -5
- pytcl/coordinate_systems/jacobians/jacobians.py +2 -2
- pytcl/coordinate_systems/projections/projections.py +2 -2
- pytcl/coordinate_systems/rotations/rotations.py +10 -6
- pytcl/core/validation.py +3 -3
- pytcl/dynamic_estimation/__init__.py +26 -0
- pytcl/dynamic_estimation/gaussian_sum_filter.py +434 -0
- pytcl/dynamic_estimation/imm.py +14 -14
- pytcl/dynamic_estimation/kalman/__init__.py +12 -0
- pytcl/dynamic_estimation/kalman/constrained.py +382 -0
- pytcl/dynamic_estimation/kalman/extended.py +8 -8
- pytcl/dynamic_estimation/kalman/h_infinity.py +2 -2
- pytcl/dynamic_estimation/kalman/square_root.py +8 -2
- pytcl/dynamic_estimation/kalman/sr_ukf.py +3 -3
- pytcl/dynamic_estimation/kalman/ud_filter.py +11 -5
- pytcl/dynamic_estimation/kalman/unscented.py +8 -6
- pytcl/dynamic_estimation/particle_filters/bootstrap.py +15 -15
- pytcl/dynamic_estimation/rbpf.py +589 -0
- pytcl/gravity/spherical_harmonics.py +3 -3
- pytcl/gravity/tides.py +6 -6
- pytcl/logging_config.py +3 -3
- pytcl/magnetism/emm.py +10 -3
- pytcl/magnetism/wmm.py +4 -4
- pytcl/mathematical_functions/combinatorics/combinatorics.py +5 -5
- pytcl/mathematical_functions/geometry/geometry.py +5 -5
- pytcl/mathematical_functions/numerical_integration/quadrature.py +6 -6
- pytcl/mathematical_functions/signal_processing/detection.py +24 -24
- pytcl/mathematical_functions/signal_processing/filters.py +14 -14
- pytcl/mathematical_functions/signal_processing/matched_filter.py +12 -12
- pytcl/mathematical_functions/special_functions/bessel.py +15 -3
- pytcl/mathematical_functions/special_functions/debye.py +5 -1
- pytcl/mathematical_functions/special_functions/error_functions.py +3 -1
- pytcl/mathematical_functions/special_functions/gamma_functions.py +4 -4
- pytcl/mathematical_functions/special_functions/hypergeometric.py +6 -4
- pytcl/mathematical_functions/transforms/fourier.py +8 -8
- pytcl/mathematical_functions/transforms/stft.py +12 -12
- pytcl/mathematical_functions/transforms/wavelets.py +9 -9
- pytcl/navigation/geodesy.py +3 -3
- pytcl/navigation/great_circle.py +5 -5
- pytcl/plotting/coordinates.py +7 -7
- pytcl/plotting/tracks.py +2 -2
- pytcl/static_estimation/maximum_likelihood.py +16 -14
- pytcl/static_estimation/robust.py +5 -5
- pytcl/terrain/loaders.py +5 -5
- pytcl/trackers/hypothesis.py +1 -1
- pytcl/trackers/mht.py +9 -9
- pytcl/trackers/multi_target.py +1 -1
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/LICENSE +0 -0
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/WHEEL +0 -0
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,379 @@
|
|
|
1
|
+
"""
|
|
2
|
+
N-dimensional assignment algorithms (4D and higher).
|
|
3
|
+
|
|
4
|
+
This module extends the 3D assignment solver to arbitrary dimensions,
|
|
5
|
+
enabling more complex assignment scenarios such as:
|
|
6
|
+
- 4D: Measurements × Tracks × Hypotheses × Sensors
|
|
7
|
+
- 5D+: Additional dimensions for time frames, maneuver classes, etc.
|
|
8
|
+
|
|
9
|
+
The module provides a unified interface for solving high-dimensional
|
|
10
|
+
assignment problems using generalized relaxation methods.
|
|
11
|
+
|
|
12
|
+
References
|
|
13
|
+
----------
|
|
14
|
+
.. [1] Poore, A. B., "Multidimensional Assignment Problem and Data
|
|
15
|
+
Association," IEEE Transactions on Aerospace and Electronic Systems,
|
|
16
|
+
2013.
|
|
17
|
+
.. [2] Cramer, R. D., et al., "The Emerging Role of Chemical Similarity in
|
|
18
|
+
Drug Discovery," Perspectives in Drug Discovery and Design, 2003.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from typing import NamedTuple, Optional, Tuple
|
|
22
|
+
|
|
23
|
+
import numpy as np
|
|
24
|
+
from numpy.typing import NDArray
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class AssignmentNDResult(NamedTuple):
|
|
28
|
+
"""Result of an N-dimensional assignment problem.
|
|
29
|
+
|
|
30
|
+
Attributes
|
|
31
|
+
----------
|
|
32
|
+
assignments : ndarray
|
|
33
|
+
Array of shape (n_assignments, n_dimensions) containing assigned
|
|
34
|
+
index tuples. Each row is an n-tuple of indices.
|
|
35
|
+
cost : float
|
|
36
|
+
Total assignment cost.
|
|
37
|
+
converged : bool
|
|
38
|
+
Whether the algorithm converged (for iterative methods).
|
|
39
|
+
n_iterations : int
|
|
40
|
+
Number of iterations used (for iterative methods).
|
|
41
|
+
gap : float
|
|
42
|
+
Optimality gap (upper_bound - lower_bound) for relaxation methods.
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
assignments: NDArray[np.intp]
|
|
46
|
+
cost: float
|
|
47
|
+
converged: bool
|
|
48
|
+
n_iterations: int
|
|
49
|
+
gap: float
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def validate_cost_tensor(cost_tensor: NDArray[np.float64]) -> Tuple[int, ...]:
|
|
53
|
+
"""
|
|
54
|
+
Validate cost tensor and return dimensions.
|
|
55
|
+
|
|
56
|
+
Parameters
|
|
57
|
+
----------
|
|
58
|
+
cost_tensor : ndarray
|
|
59
|
+
Cost tensor of arbitrary dimension.
|
|
60
|
+
|
|
61
|
+
Returns
|
|
62
|
+
-------
|
|
63
|
+
dims : tuple
|
|
64
|
+
Dimensions of the cost tensor.
|
|
65
|
+
|
|
66
|
+
Raises
|
|
67
|
+
------
|
|
68
|
+
ValueError
|
|
69
|
+
If tensor has fewer than 2 dimensions.
|
|
70
|
+
"""
|
|
71
|
+
if cost_tensor.ndim < 2:
|
|
72
|
+
raise ValueError(
|
|
73
|
+
f"Cost tensor must have at least 2 dimensions, got {cost_tensor.ndim}"
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
return cost_tensor.shape
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def greedy_assignment_nd(
|
|
80
|
+
cost_tensor: NDArray[np.float64],
|
|
81
|
+
max_assignments: Optional[int] = None,
|
|
82
|
+
) -> AssignmentNDResult:
|
|
83
|
+
"""
|
|
84
|
+
Greedy solver for N-dimensional assignment.
|
|
85
|
+
|
|
86
|
+
Selects minimum-cost tuples in order until no more valid assignments
|
|
87
|
+
exist (no dimension index is repeated).
|
|
88
|
+
|
|
89
|
+
Parameters
|
|
90
|
+
----------
|
|
91
|
+
cost_tensor : ndarray
|
|
92
|
+
Cost tensor of shape (n1, n2, ..., nk).
|
|
93
|
+
max_assignments : int, optional
|
|
94
|
+
Maximum number of assignments to find (default: min(dimensions)).
|
|
95
|
+
|
|
96
|
+
Returns
|
|
97
|
+
-------
|
|
98
|
+
AssignmentNDResult
|
|
99
|
+
Assignments, total cost, and algorithm info.
|
|
100
|
+
|
|
101
|
+
Notes
|
|
102
|
+
-----
|
|
103
|
+
Greedy assignment is fast O(n log n) but not optimal. Used as
|
|
104
|
+
heuristic or starting solution for optimization methods.
|
|
105
|
+
"""
|
|
106
|
+
dims = cost_tensor.shape
|
|
107
|
+
n_dims = len(dims)
|
|
108
|
+
|
|
109
|
+
if max_assignments is None:
|
|
110
|
+
max_assignments = min(dims)
|
|
111
|
+
|
|
112
|
+
# Flatten tensor with index mapping
|
|
113
|
+
flat_costs = cost_tensor.ravel()
|
|
114
|
+
sorted_indices = np.argsort(flat_costs)
|
|
115
|
+
|
|
116
|
+
assignments: list[tuple[int, ...]] = []
|
|
117
|
+
used_indices: list[set[int]] = [set() for _ in range(n_dims)]
|
|
118
|
+
|
|
119
|
+
for flat_idx in sorted_indices:
|
|
120
|
+
if len(assignments) >= max_assignments:
|
|
121
|
+
break
|
|
122
|
+
|
|
123
|
+
# Convert flat index to multi-dimensional index
|
|
124
|
+
multi_idx = np.unravel_index(flat_idx, dims)
|
|
125
|
+
|
|
126
|
+
# Check if any dimension index is already used
|
|
127
|
+
conflict = False
|
|
128
|
+
for d, idx in enumerate(multi_idx):
|
|
129
|
+
if idx in used_indices[d]:
|
|
130
|
+
conflict = True
|
|
131
|
+
break
|
|
132
|
+
|
|
133
|
+
if not conflict:
|
|
134
|
+
assignments.append(multi_idx)
|
|
135
|
+
for d, idx in enumerate(multi_idx):
|
|
136
|
+
used_indices[d].add(idx)
|
|
137
|
+
|
|
138
|
+
assignments_array = np.array(assignments, dtype=np.intp)
|
|
139
|
+
if assignments_array.size > 0:
|
|
140
|
+
total_cost = float(np.sum(cost_tensor[tuple(assignments_array.T)]))
|
|
141
|
+
else:
|
|
142
|
+
total_cost = 0.0
|
|
143
|
+
|
|
144
|
+
return AssignmentNDResult(
|
|
145
|
+
assignments=assignments_array,
|
|
146
|
+
cost=total_cost,
|
|
147
|
+
converged=True,
|
|
148
|
+
n_iterations=1,
|
|
149
|
+
gap=0.0, # Greedy doesn't compute lower bound
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def relaxation_assignment_nd(
|
|
154
|
+
cost_tensor: NDArray[np.float64],
|
|
155
|
+
max_iterations: int = 100,
|
|
156
|
+
tolerance: float = 1e-6,
|
|
157
|
+
verbose: bool = False,
|
|
158
|
+
) -> AssignmentNDResult:
|
|
159
|
+
"""
|
|
160
|
+
Lagrangian relaxation solver for N-dimensional assignment.
|
|
161
|
+
|
|
162
|
+
Uses iterative subgradient optimization on Lagrange multipliers
|
|
163
|
+
to tighten the lower bound and find good solutions.
|
|
164
|
+
|
|
165
|
+
Parameters
|
|
166
|
+
----------
|
|
167
|
+
cost_tensor : ndarray
|
|
168
|
+
Cost tensor of shape (n1, n2, ..., nk).
|
|
169
|
+
max_iterations : int, optional
|
|
170
|
+
Maximum iterations (default 100).
|
|
171
|
+
tolerance : float, optional
|
|
172
|
+
Convergence tolerance for gap (default 1e-6).
|
|
173
|
+
verbose : bool, optional
|
|
174
|
+
Print iteration info (default False).
|
|
175
|
+
|
|
176
|
+
Returns
|
|
177
|
+
-------
|
|
178
|
+
AssignmentNDResult
|
|
179
|
+
Assignments, total cost, convergence info, and optimality gap.
|
|
180
|
+
|
|
181
|
+
Notes
|
|
182
|
+
-----
|
|
183
|
+
The relaxation approach:
|
|
184
|
+
1. Maintain Lagrange multipliers for each dimension
|
|
185
|
+
2. Solve relaxed problem (select best entries per tuple)
|
|
186
|
+
3. Update multipliers based on constraint violations
|
|
187
|
+
4. Iterate until convergence or gap tolerance met
|
|
188
|
+
|
|
189
|
+
This guarantees a lower bound on optimal cost and often finds
|
|
190
|
+
near-optimal or optimal solutions.
|
|
191
|
+
"""
|
|
192
|
+
dims = cost_tensor.shape
|
|
193
|
+
n_dims = len(dims)
|
|
194
|
+
|
|
195
|
+
# Initialize Lagrange multipliers (one per dimension per index)
|
|
196
|
+
lambdas = [np.zeros(dim) for dim in dims]
|
|
197
|
+
|
|
198
|
+
best_cost = np.inf
|
|
199
|
+
best_assignments = None
|
|
200
|
+
lower_bound = -np.inf
|
|
201
|
+
|
|
202
|
+
for iteration in range(max_iterations):
|
|
203
|
+
# Compute relaxed costs: original - Lagrange penalty
|
|
204
|
+
relaxed_cost = cost_tensor.copy()
|
|
205
|
+
for d in range(n_dims):
|
|
206
|
+
# Reshape lambda[d] to broadcast correctly
|
|
207
|
+
shape = [1] * n_dims
|
|
208
|
+
shape[d] = dims[d]
|
|
209
|
+
relaxed_cost = relaxed_cost - lambdas[d].reshape(shape)
|
|
210
|
+
|
|
211
|
+
# Solve relaxed problem: greedy on relaxed costs
|
|
212
|
+
result_relaxed = greedy_assignment_nd(relaxed_cost)
|
|
213
|
+
|
|
214
|
+
# Compute lower bound from relaxed solution
|
|
215
|
+
lower_bound = result_relaxed.cost + sum(
|
|
216
|
+
np.sum(lambdas[d]) for d in range(n_dims)
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
# Extract solution from relaxed problem
|
|
220
|
+
if len(result_relaxed.assignments) > 0:
|
|
221
|
+
actual_cost = float(
|
|
222
|
+
np.sum(cost_tensor[tuple(result_relaxed.assignments.T)])
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
if actual_cost < best_cost:
|
|
226
|
+
best_cost = actual_cost
|
|
227
|
+
best_assignments = result_relaxed.assignments
|
|
228
|
+
|
|
229
|
+
# Compute constraint violations and update multipliers
|
|
230
|
+
violations = [np.zeros(dim) for dim in dims]
|
|
231
|
+
|
|
232
|
+
for assignment in result_relaxed.assignments:
|
|
233
|
+
for d, idx in enumerate(assignment):
|
|
234
|
+
violations[d][idx] += 1
|
|
235
|
+
|
|
236
|
+
# Subgradient descent on multipliers
|
|
237
|
+
step_size = 1.0 / (iteration + 1)
|
|
238
|
+
for d in range(n_dims):
|
|
239
|
+
lambdas[d] -= step_size * (violations[d] - 1.0)
|
|
240
|
+
|
|
241
|
+
# Compute gap
|
|
242
|
+
gap = best_cost - lower_bound if best_cost != np.inf else np.inf
|
|
243
|
+
|
|
244
|
+
if verbose:
|
|
245
|
+
print(
|
|
246
|
+
f"Iter {iteration+1}: LB={lower_bound:.4f}, UB={best_cost:.4f}, "
|
|
247
|
+
f"Gap={gap:.6f}"
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
if gap < tolerance:
|
|
251
|
+
if verbose:
|
|
252
|
+
print(f"Converged at iteration {iteration+1}")
|
|
253
|
+
break
|
|
254
|
+
|
|
255
|
+
if best_assignments is None:
|
|
256
|
+
best_assignments = np.empty((0, n_dims), dtype=np.intp)
|
|
257
|
+
best_cost = 0.0
|
|
258
|
+
|
|
259
|
+
gap = best_cost - lower_bound if best_cost != np.inf else np.inf
|
|
260
|
+
|
|
261
|
+
return AssignmentNDResult(
|
|
262
|
+
assignments=best_assignments,
|
|
263
|
+
cost=best_cost,
|
|
264
|
+
converged=gap < tolerance,
|
|
265
|
+
n_iterations=iteration + 1,
|
|
266
|
+
gap=gap,
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def auction_assignment_nd(
|
|
271
|
+
cost_tensor: NDArray[np.float64],
|
|
272
|
+
max_iterations: int = 100,
|
|
273
|
+
epsilon: float = 0.01,
|
|
274
|
+
verbose: bool = False,
|
|
275
|
+
) -> AssignmentNDResult:
|
|
276
|
+
"""
|
|
277
|
+
Auction algorithm for N-dimensional assignment.
|
|
278
|
+
|
|
279
|
+
Inspired by the classical auction algorithm for 2D assignment,
|
|
280
|
+
adapted to higher dimensions. Objects bid for assignments based
|
|
281
|
+
on relative costs.
|
|
282
|
+
|
|
283
|
+
Parameters
|
|
284
|
+
----------
|
|
285
|
+
cost_tensor : ndarray
|
|
286
|
+
Cost tensor of shape (n1, n2, ..., nk).
|
|
287
|
+
max_iterations : int, optional
|
|
288
|
+
Maximum iterations (default 100).
|
|
289
|
+
epsilon : float, optional
|
|
290
|
+
Bid increment (default 0.01). Larger epsilon → fewer iterations,
|
|
291
|
+
worse solution; smaller epsilon → more iterations, better solution.
|
|
292
|
+
verbose : bool, optional
|
|
293
|
+
Print iteration info (default False).
|
|
294
|
+
|
|
295
|
+
Returns
|
|
296
|
+
-------
|
|
297
|
+
AssignmentNDResult
|
|
298
|
+
Assignments, total cost, convergence info, gap estimate.
|
|
299
|
+
|
|
300
|
+
Notes
|
|
301
|
+
-----
|
|
302
|
+
The algorithm maintains a "price" for each index and allows bidding
|
|
303
|
+
(price adjustment) to maximize value. Converges to epsilon-optimal
|
|
304
|
+
solution in finite iterations.
|
|
305
|
+
"""
|
|
306
|
+
dims = cost_tensor.shape
|
|
307
|
+
n_dims = len(dims)
|
|
308
|
+
|
|
309
|
+
# Initialize prices (one per dimension per index)
|
|
310
|
+
prices = [np.zeros(dim) for dim in dims]
|
|
311
|
+
|
|
312
|
+
for iteration in range(max_iterations):
|
|
313
|
+
# Compute profit: cost - price penalty
|
|
314
|
+
profit = cost_tensor.copy()
|
|
315
|
+
for d in range(n_dims):
|
|
316
|
+
shape = [1] * n_dims
|
|
317
|
+
shape[d] = dims[d]
|
|
318
|
+
profit = profit - prices[d].reshape(shape)
|
|
319
|
+
|
|
320
|
+
# Find best assignment at current prices (greedy)
|
|
321
|
+
result = greedy_assignment_nd(profit)
|
|
322
|
+
|
|
323
|
+
if len(result.assignments) == 0:
|
|
324
|
+
break
|
|
325
|
+
|
|
326
|
+
# Update prices: increase price for "in-demand" indices
|
|
327
|
+
demands = [np.zeros(dim) for dim in dims]
|
|
328
|
+
for assignment in result.assignments:
|
|
329
|
+
for d, idx in enumerate(assignment):
|
|
330
|
+
demands[d][idx] += 1
|
|
331
|
+
|
|
332
|
+
for d in range(n_dims):
|
|
333
|
+
prices[d] += epsilon * (demands[d] - 1.0)
|
|
334
|
+
|
|
335
|
+
if verbose and (iteration + 1) % 10 == 0:
|
|
336
|
+
actual_cost = float(np.sum(cost_tensor[tuple(result.assignments.T)]))
|
|
337
|
+
print(f"Iter {iteration+1}: Cost={actual_cost:.4f}")
|
|
338
|
+
|
|
339
|
+
# Final solution
|
|
340
|
+
result = greedy_assignment_nd(cost_tensor)
|
|
341
|
+
|
|
342
|
+
return AssignmentNDResult(
|
|
343
|
+
assignments=result.assignments,
|
|
344
|
+
cost=result.cost,
|
|
345
|
+
converged=True,
|
|
346
|
+
n_iterations=iteration + 1,
|
|
347
|
+
gap=0.0, # Auction algorithm doesn't track gap formally
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
def detect_dimension_conflicts(
|
|
352
|
+
assignments: NDArray[np.intp],
|
|
353
|
+
dims: Tuple[int, ...],
|
|
354
|
+
) -> bool:
|
|
355
|
+
"""
|
|
356
|
+
Check if assignments violate dimension uniqueness.
|
|
357
|
+
|
|
358
|
+
For valid assignment, each index should appear at most once per dimension.
|
|
359
|
+
|
|
360
|
+
Parameters
|
|
361
|
+
----------
|
|
362
|
+
assignments : ndarray
|
|
363
|
+
Array of shape (n_assignments, n_dimensions) with assignments.
|
|
364
|
+
dims : tuple
|
|
365
|
+
Dimensions of the cost tensor.
|
|
366
|
+
|
|
367
|
+
Returns
|
|
368
|
+
-------
|
|
369
|
+
has_conflicts : bool
|
|
370
|
+
True if any index appears more than once in any dimension.
|
|
371
|
+
"""
|
|
372
|
+
n_dims = len(dims)
|
|
373
|
+
|
|
374
|
+
for d in range(n_dims):
|
|
375
|
+
indices_in_dim = assignments[:, d]
|
|
376
|
+
if len(indices_in_dim) != len(np.unique(indices_in_dim)):
|
|
377
|
+
return True
|
|
378
|
+
|
|
379
|
+
return False
|