nrl-tracker 0.22.5__py3-none-any.whl → 1.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.8.0.dist-info}/METADATA +57 -10
  2. {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.8.0.dist-info}/RECORD +86 -69
  3. pytcl/__init__.py +4 -3
  4. pytcl/assignment_algorithms/__init__.py +28 -0
  5. pytcl/assignment_algorithms/dijkstra_min_cost.py +184 -0
  6. pytcl/assignment_algorithms/gating.py +10 -10
  7. pytcl/assignment_algorithms/jpda.py +40 -40
  8. pytcl/assignment_algorithms/nd_assignment.py +379 -0
  9. pytcl/assignment_algorithms/network_flow.py +464 -0
  10. pytcl/assignment_algorithms/network_simplex.py +167 -0
  11. pytcl/assignment_algorithms/three_dimensional/assignment.py +3 -3
  12. pytcl/astronomical/__init__.py +104 -3
  13. pytcl/astronomical/ephemerides.py +14 -11
  14. pytcl/astronomical/reference_frames.py +865 -56
  15. pytcl/astronomical/relativity.py +6 -5
  16. pytcl/astronomical/sgp4.py +710 -0
  17. pytcl/astronomical/special_orbits.py +532 -0
  18. pytcl/astronomical/tle.py +558 -0
  19. pytcl/atmosphere/__init__.py +43 -1
  20. pytcl/atmosphere/ionosphere.py +512 -0
  21. pytcl/atmosphere/nrlmsise00.py +809 -0
  22. pytcl/clustering/dbscan.py +2 -2
  23. pytcl/clustering/gaussian_mixture.py +3 -3
  24. pytcl/clustering/hierarchical.py +15 -15
  25. pytcl/clustering/kmeans.py +4 -4
  26. pytcl/containers/__init__.py +24 -0
  27. pytcl/containers/base.py +219 -0
  28. pytcl/containers/cluster_set.py +12 -2
  29. pytcl/containers/covertree.py +26 -29
  30. pytcl/containers/kd_tree.py +94 -29
  31. pytcl/containers/rtree.py +200 -1
  32. pytcl/containers/vptree.py +21 -28
  33. pytcl/coordinate_systems/conversions/geodetic.py +272 -5
  34. pytcl/coordinate_systems/jacobians/jacobians.py +2 -2
  35. pytcl/coordinate_systems/projections/__init__.py +1 -1
  36. pytcl/coordinate_systems/projections/projections.py +2 -2
  37. pytcl/coordinate_systems/rotations/rotations.py +10 -6
  38. pytcl/core/__init__.py +18 -0
  39. pytcl/core/validation.py +333 -2
  40. pytcl/dynamic_estimation/__init__.py +26 -0
  41. pytcl/dynamic_estimation/gaussian_sum_filter.py +434 -0
  42. pytcl/dynamic_estimation/imm.py +14 -14
  43. pytcl/dynamic_estimation/kalman/__init__.py +30 -0
  44. pytcl/dynamic_estimation/kalman/constrained.py +382 -0
  45. pytcl/dynamic_estimation/kalman/extended.py +8 -8
  46. pytcl/dynamic_estimation/kalman/h_infinity.py +613 -0
  47. pytcl/dynamic_estimation/kalman/square_root.py +60 -573
  48. pytcl/dynamic_estimation/kalman/sr_ukf.py +302 -0
  49. pytcl/dynamic_estimation/kalman/ud_filter.py +410 -0
  50. pytcl/dynamic_estimation/kalman/unscented.py +8 -6
  51. pytcl/dynamic_estimation/particle_filters/bootstrap.py +15 -15
  52. pytcl/dynamic_estimation/rbpf.py +589 -0
  53. pytcl/gravity/egm.py +13 -0
  54. pytcl/gravity/spherical_harmonics.py +98 -37
  55. pytcl/gravity/tides.py +6 -6
  56. pytcl/logging_config.py +328 -0
  57. pytcl/magnetism/__init__.py +7 -0
  58. pytcl/magnetism/emm.py +10 -3
  59. pytcl/magnetism/wmm.py +260 -23
  60. pytcl/mathematical_functions/combinatorics/combinatorics.py +5 -5
  61. pytcl/mathematical_functions/geometry/geometry.py +5 -5
  62. pytcl/mathematical_functions/numerical_integration/quadrature.py +6 -6
  63. pytcl/mathematical_functions/signal_processing/detection.py +24 -24
  64. pytcl/mathematical_functions/signal_processing/filters.py +14 -14
  65. pytcl/mathematical_functions/signal_processing/matched_filter.py +12 -12
  66. pytcl/mathematical_functions/special_functions/bessel.py +15 -3
  67. pytcl/mathematical_functions/special_functions/debye.py +136 -26
  68. pytcl/mathematical_functions/special_functions/error_functions.py +3 -1
  69. pytcl/mathematical_functions/special_functions/gamma_functions.py +4 -4
  70. pytcl/mathematical_functions/special_functions/hypergeometric.py +81 -15
  71. pytcl/mathematical_functions/transforms/fourier.py +8 -8
  72. pytcl/mathematical_functions/transforms/stft.py +12 -12
  73. pytcl/mathematical_functions/transforms/wavelets.py +9 -9
  74. pytcl/navigation/geodesy.py +246 -160
  75. pytcl/navigation/great_circle.py +101 -19
  76. pytcl/plotting/coordinates.py +7 -7
  77. pytcl/plotting/tracks.py +2 -2
  78. pytcl/static_estimation/maximum_likelihood.py +16 -14
  79. pytcl/static_estimation/robust.py +5 -5
  80. pytcl/terrain/loaders.py +5 -5
  81. pytcl/trackers/hypothesis.py +1 -1
  82. pytcl/trackers/mht.py +9 -9
  83. pytcl/trackers/multi_target.py +1 -1
  84. {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.8.0.dist-info}/LICENSE +0 -0
  85. {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.8.0.dist-info}/WHEEL +0 -0
  86. {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.8.0.dist-info}/top_level.txt +0 -0
@@ -5,7 +5,7 @@ This module provides gating methods to determine which measurements
5
5
  fall within a validation region around predicted track states.
6
6
  """
7
7
 
8
- from typing import List, Tuple
8
+ from typing import Any, List, Tuple
9
9
 
10
10
  import numpy as np
11
11
  from numba import njit
@@ -15,8 +15,8 @@ from scipy.stats import chi2
15
15
 
16
16
  @njit(cache=True, fastmath=True)
17
17
  def _mahalanobis_distance_2d(
18
- innovation: np.ndarray,
19
- S_inv: np.ndarray,
18
+ innovation: np.ndarray[Any, Any],
19
+ S_inv: np.ndarray[Any, Any],
20
20
  ) -> float:
21
21
  """JIT-compiled Mahalanobis distance for 2D innovations."""
22
22
  return innovation[0] * (
@@ -26,8 +26,8 @@ def _mahalanobis_distance_2d(
26
26
 
27
27
  @njit(cache=True, fastmath=True)
28
28
  def _mahalanobis_distance_3d(
29
- innovation: np.ndarray,
30
- S_inv: np.ndarray,
29
+ innovation: np.ndarray[Any, Any],
30
+ S_inv: np.ndarray[Any, Any],
31
31
  ) -> float:
32
32
  """JIT-compiled Mahalanobis distance for 3D innovations."""
33
33
  result = 0.0
@@ -39,8 +39,8 @@ def _mahalanobis_distance_3d(
39
39
 
40
40
  @njit(cache=True, fastmath=True)
41
41
  def _mahalanobis_distance_general(
42
- innovation: np.ndarray,
43
- S_inv: np.ndarray,
42
+ innovation: np.ndarray[Any, Any],
43
+ S_inv: np.ndarray[Any, Any],
44
44
  ) -> float:
45
45
  """JIT-compiled Mahalanobis distance for general dimension."""
46
46
  n = len(innovation)
@@ -341,9 +341,9 @@ def compute_gate_volume(
341
341
 
342
342
  @njit(cache=True, fastmath=True, parallel=False)
343
343
  def mahalanobis_batch(
344
- innovations: np.ndarray,
345
- S_inv: np.ndarray,
346
- output: np.ndarray,
344
+ innovations: np.ndarray[Any, Any],
345
+ S_inv: np.ndarray[Any, Any],
346
+ output: np.ndarray[Any, Any],
347
347
  ) -> None:
348
348
  """
349
349
  Compute Mahalanobis distances for a batch of innovations.
@@ -9,7 +9,7 @@ This is more sophisticated than GNN which makes hard assignment decisions,
9
9
  as JPDA can handle measurement origin uncertainty in cluttered environments.
10
10
  """
11
11
 
12
- from typing import List, NamedTuple, Optional, Tuple
12
+ from typing import Any, List, NamedTuple, Optional
13
13
 
14
14
  import numpy as np
15
15
  from numba import njit
@@ -24,7 +24,7 @@ class JPDAResult(NamedTuple):
24
24
 
25
25
  Attributes
26
26
  ----------
27
- association_probs : ndarray
27
+ association_probs : ndarray[Any]
28
28
  Association probability matrix of shape (n_tracks, n_measurements + 1).
29
29
  association_probs[i, j] is the probability that track i is associated
30
30
  with measurement j. The last column (j = n_measurements) represents
@@ -32,9 +32,9 @@ class JPDAResult(NamedTuple):
32
32
  marginal_probs : list of ndarray
33
33
  List of marginal association probabilities for each track.
34
34
  marginal_probs[i][j] = P(measurement j originated from track i).
35
- likelihood_matrix : ndarray
35
+ likelihood_matrix : ndarray[Any]
36
36
  Measurement likelihood matrix of shape (n_tracks, n_measurements).
37
- gated : ndarray
37
+ gated : ndarray[Any]
38
38
  Boolean matrix indicating which track-measurement pairs passed gating.
39
39
  """
40
40
 
@@ -53,7 +53,7 @@ class JPDAUpdate(NamedTuple):
53
53
  Updated state estimates for each track.
54
54
  covariances : list of ndarray
55
55
  Updated covariances for each track (includes spread of means).
56
- association_probs : ndarray
56
+ association_probs : ndarray[Any]
57
57
  Association probability matrix.
58
58
  innovations : list of ndarray
59
59
  Combined weighted innovations for each track.
@@ -66,8 +66,8 @@ class JPDAUpdate(NamedTuple):
66
66
 
67
67
 
68
68
  def compute_measurement_likelihood(
69
- innovation: NDArray,
70
- innovation_cov: NDArray,
69
+ innovation: NDArray[Any],
70
+ innovation_cov: NDArray[Any],
71
71
  detection_prob: float = 1.0,
72
72
  ) -> float:
73
73
  """
@@ -75,9 +75,9 @@ def compute_measurement_likelihood(
75
75
 
76
76
  Parameters
77
77
  ----------
78
- innovation : ndarray
78
+ innovation : ndarray[Any]
79
79
  Measurement innovation (residual), shape (m,).
80
- innovation_cov : ndarray
80
+ innovation_cov : ndarray[Any]
81
81
  Innovation covariance, shape (m, m).
82
82
  detection_prob : float
83
83
  Probability of detection (Pd).
@@ -102,14 +102,14 @@ def compute_measurement_likelihood(
102
102
 
103
103
 
104
104
  def compute_likelihood_matrix(
105
- track_states: List[NDArray],
106
- track_covariances: List[NDArray],
107
- measurements: NDArray,
108
- H: NDArray,
109
- R: NDArray,
105
+ track_states: list[NDArray[Any]],
106
+ track_covariances: list[NDArray[Any]],
107
+ measurements: NDArray[Any],
108
+ H: NDArray[Any],
109
+ R: NDArray[Any],
110
110
  detection_prob: float = 1.0,
111
111
  gate_threshold: Optional[float] = None,
112
- ) -> Tuple[NDArray, NDArray]:
112
+ ) -> tuple[NDArray[Any], NDArray[Any]]:
113
113
  """
114
114
  Compute likelihood matrix for all track-measurement pairs.
115
115
 
@@ -119,11 +119,11 @@ def compute_likelihood_matrix(
119
119
  State estimates for each track.
120
120
  track_covariances : list of ndarray
121
121
  Covariances for each track.
122
- measurements : ndarray
122
+ measurements : ndarray[Any]
123
123
  Measurements, shape (n_meas, m).
124
- H : ndarray
124
+ H : ndarray[Any]
125
125
  Measurement matrix, shape (m, n).
126
- R : ndarray
126
+ R : ndarray[Any]
127
127
  Measurement noise covariance, shape (m, m).
128
128
  detection_prob : float
129
129
  Probability of detection.
@@ -132,9 +132,9 @@ def compute_likelihood_matrix(
132
132
 
133
133
  Returns
134
134
  -------
135
- likelihood_matrix : ndarray
135
+ likelihood_matrix : ndarray[Any]
136
136
  Likelihood values, shape (n_tracks, n_meas).
137
- gated : ndarray
137
+ gated : ndarray[Any]
138
138
  Boolean gating matrix, shape (n_tracks, n_meas).
139
139
  """
140
140
  n_tracks = len(track_states)
@@ -163,11 +163,11 @@ def compute_likelihood_matrix(
163
163
 
164
164
 
165
165
  def jpda_probabilities(
166
- likelihood_matrix: NDArray,
167
- gated: NDArray,
166
+ likelihood_matrix: NDArray[Any],
167
+ gated: NDArray[Any],
168
168
  detection_prob: float = 1.0,
169
169
  clutter_density: float = 1e-6,
170
- ) -> NDArray:
170
+ ) -> NDArray[Any]:
171
171
  """
172
172
  Compute JPDA association probabilities.
173
173
 
@@ -176,9 +176,9 @@ def jpda_probabilities(
176
176
 
177
177
  Parameters
178
178
  ----------
179
- likelihood_matrix : ndarray
179
+ likelihood_matrix : ndarray[Any]
180
180
  Likelihood values, shape (n_tracks, n_meas).
181
- gated : ndarray
181
+ gated : ndarray[Any]
182
182
  Boolean gating matrix, shape (n_tracks, n_meas).
183
183
  detection_prob : float
184
184
  Probability of detection (Pd).
@@ -187,7 +187,7 @@ def jpda_probabilities(
187
187
 
188
188
  Returns
189
189
  -------
190
- beta : ndarray
190
+ beta : ndarray[Any]
191
191
  Association probability matrix, shape (n_tracks, n_meas + 1).
192
192
  beta[i, j] = P(measurement j is from track i) for j < n_meas.
193
193
  beta[i, n_meas] = P(track i has no measurement).
@@ -218,11 +218,11 @@ def jpda_probabilities(
218
218
 
219
219
 
220
220
  def _jpda_exact(
221
- likelihood_matrix: NDArray,
222
- gated: NDArray,
221
+ likelihood_matrix: NDArray[Any],
222
+ gated: NDArray[Any],
223
223
  detection_prob: float,
224
224
  clutter_density: float,
225
- ) -> NDArray:
225
+ ) -> NDArray[Any]:
226
226
  """
227
227
  Exact JPDA computation via hypothesis enumeration.
228
228
 
@@ -241,8 +241,8 @@ def _jpda_exact(
241
241
  def generate_hypotheses(
242
242
  meas_idx: int,
243
243
  current_assignment: List[int],
244
- used_tracks: set,
245
- ):
244
+ used_tracks: set[Any],
245
+ ) -> Any:
246
246
  """Recursively generate valid hypotheses."""
247
247
  if meas_idx == n_meas:
248
248
  yield current_assignment.copy()
@@ -268,11 +268,11 @@ def _jpda_exact(
268
268
  hypothesis_probs = []
269
269
  hypothesis_assignments = []
270
270
 
271
- for assignment in generate_hypotheses(0, [], set()):
271
+ for assignment in generate_hypotheses(0, [], set[Any]()):
272
272
  # Compute probability of this hypothesis
273
273
  prob = 1.0
274
274
 
275
- detected_tracks = set()
275
+ detected_tracks = set[Any]()
276
276
  for j, track_idx in enumerate(assignment):
277
277
  if track_idx == -1:
278
278
  # Measurement j is clutter
@@ -301,7 +301,7 @@ def _jpda_exact(
301
301
  for h_idx, (assignment, prob) in enumerate(
302
302
  zip(hypothesis_assignments, hypothesis_probs)
303
303
  ):
304
- detected_tracks = set()
304
+ detected_tracks = set[Any]()
305
305
  for j, track_idx in enumerate(assignment):
306
306
  if track_idx >= 0:
307
307
  beta[track_idx, j] += prob
@@ -317,11 +317,11 @@ def _jpda_exact(
317
317
 
318
318
  @njit(cache=True)
319
319
  def _jpda_approximate_core(
320
- likelihood_matrix: np.ndarray,
321
- gated: np.ndarray,
320
+ likelihood_matrix: np.ndarray[Any, Any],
321
+ gated: np.ndarray[Any, Any],
322
322
  detection_prob: float,
323
323
  clutter_density: float,
324
- ) -> np.ndarray:
324
+ ) -> np.ndarray[Any, Any]:
325
325
  """JIT-compiled core of approximate JPDA computation."""
326
326
  n_tracks = likelihood_matrix.shape[0]
327
327
  n_meas = likelihood_matrix.shape[1]
@@ -369,11 +369,11 @@ def _jpda_approximate_core(
369
369
 
370
370
 
371
371
  def _jpda_approximate(
372
- likelihood_matrix: NDArray,
373
- gated: NDArray,
372
+ likelihood_matrix: NDArray[Any],
373
+ gated: NDArray[Any],
374
374
  detection_prob: float,
375
375
  clutter_density: float,
376
- ) -> NDArray:
376
+ ) -> NDArray[Any]:
377
377
  """
378
378
  Approximate JPDA using parametric approach.
379
379
 
@@ -0,0 +1,379 @@
1
+ """
2
+ N-dimensional assignment algorithms (4D and higher).
3
+
4
+ This module extends the 3D assignment solver to arbitrary dimensions,
5
+ enabling more complex assignment scenarios such as:
6
+ - 4D: Measurements × Tracks × Hypotheses × Sensors
7
+ - 5D+: Additional dimensions for time frames, maneuver classes, etc.
8
+
9
+ The module provides a unified interface for solving high-dimensional
10
+ assignment problems using generalized relaxation methods.
11
+
12
+ References
13
+ ----------
14
+ .. [1] Poore, A. B., "Multidimensional Assignment Problem and Data
15
+ Association," IEEE Transactions on Aerospace and Electronic Systems,
16
+ 2013.
17
+ .. [2] Cramer, R. D., et al., "The Emerging Role of Chemical Similarity in
18
+ Drug Discovery," Perspectives in Drug Discovery and Design, 2003.
19
+ """
20
+
21
+ from typing import NamedTuple, Optional, Tuple
22
+
23
+ import numpy as np
24
+ from numpy.typing import NDArray
25
+
26
+
27
+ class AssignmentNDResult(NamedTuple):
28
+ """Result of an N-dimensional assignment problem.
29
+
30
+ Attributes
31
+ ----------
32
+ assignments : ndarray
33
+ Array of shape (n_assignments, n_dimensions) containing assigned
34
+ index tuples. Each row is an n-tuple of indices.
35
+ cost : float
36
+ Total assignment cost.
37
+ converged : bool
38
+ Whether the algorithm converged (for iterative methods).
39
+ n_iterations : int
40
+ Number of iterations used (for iterative methods).
41
+ gap : float
42
+ Optimality gap (upper_bound - lower_bound) for relaxation methods.
43
+ """
44
+
45
+ assignments: NDArray[np.intp]
46
+ cost: float
47
+ converged: bool
48
+ n_iterations: int
49
+ gap: float
50
+
51
+
52
+ def validate_cost_tensor(cost_tensor: NDArray[np.float64]) -> Tuple[int, ...]:
53
+ """
54
+ Validate cost tensor and return dimensions.
55
+
56
+ Parameters
57
+ ----------
58
+ cost_tensor : ndarray
59
+ Cost tensor of arbitrary dimension.
60
+
61
+ Returns
62
+ -------
63
+ dims : tuple
64
+ Dimensions of the cost tensor.
65
+
66
+ Raises
67
+ ------
68
+ ValueError
69
+ If tensor has fewer than 2 dimensions.
70
+ """
71
+ if cost_tensor.ndim < 2:
72
+ raise ValueError(
73
+ f"Cost tensor must have at least 2 dimensions, got {cost_tensor.ndim}"
74
+ )
75
+
76
+ return cost_tensor.shape
77
+
78
+
79
+ def greedy_assignment_nd(
80
+ cost_tensor: NDArray[np.float64],
81
+ max_assignments: Optional[int] = None,
82
+ ) -> AssignmentNDResult:
83
+ """
84
+ Greedy solver for N-dimensional assignment.
85
+
86
+ Selects minimum-cost tuples in order until no more valid assignments
87
+ exist (no dimension index is repeated).
88
+
89
+ Parameters
90
+ ----------
91
+ cost_tensor : ndarray
92
+ Cost tensor of shape (n1, n2, ..., nk).
93
+ max_assignments : int, optional
94
+ Maximum number of assignments to find (default: min(dimensions)).
95
+
96
+ Returns
97
+ -------
98
+ AssignmentNDResult
99
+ Assignments, total cost, and algorithm info.
100
+
101
+ Notes
102
+ -----
103
+ Greedy assignment is fast O(n log n) but not optimal. Used as
104
+ heuristic or starting solution for optimization methods.
105
+ """
106
+ dims = cost_tensor.shape
107
+ n_dims = len(dims)
108
+
109
+ if max_assignments is None:
110
+ max_assignments = min(dims)
111
+
112
+ # Flatten tensor with index mapping
113
+ flat_costs = cost_tensor.ravel()
114
+ sorted_indices = np.argsort(flat_costs)
115
+
116
+ assignments: list[tuple[int, ...]] = []
117
+ used_indices: list[set[int]] = [set() for _ in range(n_dims)]
118
+
119
+ for flat_idx in sorted_indices:
120
+ if len(assignments) >= max_assignments:
121
+ break
122
+
123
+ # Convert flat index to multi-dimensional index
124
+ multi_idx = np.unravel_index(flat_idx, dims)
125
+
126
+ # Check if any dimension index is already used
127
+ conflict = False
128
+ for d, idx in enumerate(multi_idx):
129
+ if idx in used_indices[d]:
130
+ conflict = True
131
+ break
132
+
133
+ if not conflict:
134
+ assignments.append(multi_idx)
135
+ for d, idx in enumerate(multi_idx):
136
+ used_indices[d].add(idx)
137
+
138
+ assignments_array = np.array(assignments, dtype=np.intp)
139
+ if assignments_array.size > 0:
140
+ total_cost = float(np.sum(cost_tensor[tuple(assignments_array.T)]))
141
+ else:
142
+ total_cost = 0.0
143
+
144
+ return AssignmentNDResult(
145
+ assignments=assignments_array,
146
+ cost=total_cost,
147
+ converged=True,
148
+ n_iterations=1,
149
+ gap=0.0, # Greedy doesn't compute lower bound
150
+ )
151
+
152
+
153
+ def relaxation_assignment_nd(
154
+ cost_tensor: NDArray[np.float64],
155
+ max_iterations: int = 100,
156
+ tolerance: float = 1e-6,
157
+ verbose: bool = False,
158
+ ) -> AssignmentNDResult:
159
+ """
160
+ Lagrangian relaxation solver for N-dimensional assignment.
161
+
162
+ Uses iterative subgradient optimization on Lagrange multipliers
163
+ to tighten the lower bound and find good solutions.
164
+
165
+ Parameters
166
+ ----------
167
+ cost_tensor : ndarray
168
+ Cost tensor of shape (n1, n2, ..., nk).
169
+ max_iterations : int, optional
170
+ Maximum iterations (default 100).
171
+ tolerance : float, optional
172
+ Convergence tolerance for gap (default 1e-6).
173
+ verbose : bool, optional
174
+ Print iteration info (default False).
175
+
176
+ Returns
177
+ -------
178
+ AssignmentNDResult
179
+ Assignments, total cost, convergence info, and optimality gap.
180
+
181
+ Notes
182
+ -----
183
+ The relaxation approach:
184
+ 1. Maintain Lagrange multipliers for each dimension
185
+ 2. Solve relaxed problem (select best entries per tuple)
186
+ 3. Update multipliers based on constraint violations
187
+ 4. Iterate until convergence or gap tolerance met
188
+
189
+ This guarantees a lower bound on optimal cost and often finds
190
+ near-optimal or optimal solutions.
191
+ """
192
+ dims = cost_tensor.shape
193
+ n_dims = len(dims)
194
+
195
+ # Initialize Lagrange multipliers (one per dimension per index)
196
+ lambdas = [np.zeros(dim) for dim in dims]
197
+
198
+ best_cost = np.inf
199
+ best_assignments = None
200
+ lower_bound = -np.inf
201
+
202
+ for iteration in range(max_iterations):
203
+ # Compute relaxed costs: original - Lagrange penalty
204
+ relaxed_cost = cost_tensor.copy()
205
+ for d in range(n_dims):
206
+ # Reshape lambda[d] to broadcast correctly
207
+ shape = [1] * n_dims
208
+ shape[d] = dims[d]
209
+ relaxed_cost = relaxed_cost - lambdas[d].reshape(shape)
210
+
211
+ # Solve relaxed problem: greedy on relaxed costs
212
+ result_relaxed = greedy_assignment_nd(relaxed_cost)
213
+
214
+ # Compute lower bound from relaxed solution
215
+ lower_bound = result_relaxed.cost + sum(
216
+ np.sum(lambdas[d]) for d in range(n_dims)
217
+ )
218
+
219
+ # Extract solution from relaxed problem
220
+ if len(result_relaxed.assignments) > 0:
221
+ actual_cost = float(
222
+ np.sum(cost_tensor[tuple(result_relaxed.assignments.T)])
223
+ )
224
+
225
+ if actual_cost < best_cost:
226
+ best_cost = actual_cost
227
+ best_assignments = result_relaxed.assignments
228
+
229
+ # Compute constraint violations and update multipliers
230
+ violations = [np.zeros(dim) for dim in dims]
231
+
232
+ for assignment in result_relaxed.assignments:
233
+ for d, idx in enumerate(assignment):
234
+ violations[d][idx] += 1
235
+
236
+ # Subgradient descent on multipliers
237
+ step_size = 1.0 / (iteration + 1)
238
+ for d in range(n_dims):
239
+ lambdas[d] -= step_size * (violations[d] - 1.0)
240
+
241
+ # Compute gap
242
+ gap = best_cost - lower_bound if best_cost != np.inf else np.inf
243
+
244
+ if verbose:
245
+ print(
246
+ f"Iter {iteration+1}: LB={lower_bound:.4f}, UB={best_cost:.4f}, "
247
+ f"Gap={gap:.6f}"
248
+ )
249
+
250
+ if gap < tolerance:
251
+ if verbose:
252
+ print(f"Converged at iteration {iteration+1}")
253
+ break
254
+
255
+ if best_assignments is None:
256
+ best_assignments = np.empty((0, n_dims), dtype=np.intp)
257
+ best_cost = 0.0
258
+
259
+ gap = best_cost - lower_bound if best_cost != np.inf else np.inf
260
+
261
+ return AssignmentNDResult(
262
+ assignments=best_assignments,
263
+ cost=best_cost,
264
+ converged=gap < tolerance,
265
+ n_iterations=iteration + 1,
266
+ gap=gap,
267
+ )
268
+
269
+
270
+ def auction_assignment_nd(
271
+ cost_tensor: NDArray[np.float64],
272
+ max_iterations: int = 100,
273
+ epsilon: float = 0.01,
274
+ verbose: bool = False,
275
+ ) -> AssignmentNDResult:
276
+ """
277
+ Auction algorithm for N-dimensional assignment.
278
+
279
+ Inspired by the classical auction algorithm for 2D assignment,
280
+ adapted to higher dimensions. Objects bid for assignments based
281
+ on relative costs.
282
+
283
+ Parameters
284
+ ----------
285
+ cost_tensor : ndarray
286
+ Cost tensor of shape (n1, n2, ..., nk).
287
+ max_iterations : int, optional
288
+ Maximum iterations (default 100).
289
+ epsilon : float, optional
290
+ Bid increment (default 0.01). Larger epsilon → fewer iterations,
291
+ worse solution; smaller epsilon → more iterations, better solution.
292
+ verbose : bool, optional
293
+ Print iteration info (default False).
294
+
295
+ Returns
296
+ -------
297
+ AssignmentNDResult
298
+ Assignments, total cost, convergence info, gap estimate.
299
+
300
+ Notes
301
+ -----
302
+ The algorithm maintains a "price" for each index and allows bidding
303
+ (price adjustment) to maximize value. Converges to epsilon-optimal
304
+ solution in finite iterations.
305
+ """
306
+ dims = cost_tensor.shape
307
+ n_dims = len(dims)
308
+
309
+ # Initialize prices (one per dimension per index)
310
+ prices = [np.zeros(dim) for dim in dims]
311
+
312
+ for iteration in range(max_iterations):
313
+ # Compute profit: cost - price penalty
314
+ profit = cost_tensor.copy()
315
+ for d in range(n_dims):
316
+ shape = [1] * n_dims
317
+ shape[d] = dims[d]
318
+ profit = profit - prices[d].reshape(shape)
319
+
320
+ # Find best assignment at current prices (greedy)
321
+ result = greedy_assignment_nd(profit)
322
+
323
+ if len(result.assignments) == 0:
324
+ break
325
+
326
+ # Update prices: increase price for "in-demand" indices
327
+ demands = [np.zeros(dim) for dim in dims]
328
+ for assignment in result.assignments:
329
+ for d, idx in enumerate(assignment):
330
+ demands[d][idx] += 1
331
+
332
+ for d in range(n_dims):
333
+ prices[d] += epsilon * (demands[d] - 1.0)
334
+
335
+ if verbose and (iteration + 1) % 10 == 0:
336
+ actual_cost = float(np.sum(cost_tensor[tuple(result.assignments.T)]))
337
+ print(f"Iter {iteration+1}: Cost={actual_cost:.4f}")
338
+
339
+ # Final solution
340
+ result = greedy_assignment_nd(cost_tensor)
341
+
342
+ return AssignmentNDResult(
343
+ assignments=result.assignments,
344
+ cost=result.cost,
345
+ converged=True,
346
+ n_iterations=iteration + 1,
347
+ gap=0.0, # Auction algorithm doesn't track gap formally
348
+ )
349
+
350
+
351
+ def detect_dimension_conflicts(
352
+ assignments: NDArray[np.intp],
353
+ dims: Tuple[int, ...],
354
+ ) -> bool:
355
+ """
356
+ Check if assignments violate dimension uniqueness.
357
+
358
+ For valid assignment, each index should appear at most once per dimension.
359
+
360
+ Parameters
361
+ ----------
362
+ assignments : ndarray
363
+ Array of shape (n_assignments, n_dimensions) with assignments.
364
+ dims : tuple
365
+ Dimensions of the cost tensor.
366
+
367
+ Returns
368
+ -------
369
+ has_conflicts : bool
370
+ True if any index appears more than once in any dimension.
371
+ """
372
+ n_dims = len(dims)
373
+
374
+ for d in range(n_dims):
375
+ indices_in_dim = assignments[:, d]
376
+ if len(indices_in_dim) != len(np.unique(indices_in_dim)):
377
+ return True
378
+
379
+ return False