nrl-tracker 1.9.1__py3-none-any.whl → 1.9.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.9.2.dist-info}/METADATA +4 -4
- {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.9.2.dist-info}/RECORD +60 -59
- pytcl/__init__.py +2 -2
- pytcl/assignment_algorithms/gating.py +18 -0
- pytcl/assignment_algorithms/jpda.py +56 -0
- pytcl/assignment_algorithms/nd_assignment.py +65 -0
- pytcl/assignment_algorithms/network_flow.py +40 -0
- pytcl/astronomical/ephemerides.py +18 -0
- pytcl/astronomical/orbital_mechanics.py +131 -0
- pytcl/atmosphere/ionosphere.py +44 -0
- pytcl/atmosphere/models.py +29 -0
- pytcl/clustering/dbscan.py +9 -0
- pytcl/clustering/gaussian_mixture.py +20 -0
- pytcl/clustering/hierarchical.py +29 -0
- pytcl/clustering/kmeans.py +9 -0
- pytcl/coordinate_systems/conversions/geodetic.py +46 -0
- pytcl/coordinate_systems/conversions/spherical.py +35 -0
- pytcl/coordinate_systems/rotations/rotations.py +147 -0
- pytcl/core/__init__.py +16 -0
- pytcl/core/maturity.py +346 -0
- pytcl/dynamic_estimation/gaussian_sum_filter.py +55 -0
- pytcl/dynamic_estimation/imm.py +29 -0
- pytcl/dynamic_estimation/information_filter.py +64 -0
- pytcl/dynamic_estimation/kalman/extended.py +56 -0
- pytcl/dynamic_estimation/kalman/linear.py +69 -0
- pytcl/dynamic_estimation/kalman/unscented.py +81 -0
- pytcl/dynamic_estimation/particle_filters/bootstrap.py +146 -0
- pytcl/dynamic_estimation/rbpf.py +51 -0
- pytcl/dynamic_estimation/smoothers.py +58 -0
- pytcl/dynamic_models/continuous_time/dynamics.py +104 -0
- pytcl/dynamic_models/discrete_time/coordinated_turn.py +6 -0
- pytcl/dynamic_models/discrete_time/singer.py +12 -0
- pytcl/dynamic_models/process_noise/coordinated_turn.py +46 -0
- pytcl/dynamic_models/process_noise/polynomial.py +6 -0
- pytcl/dynamic_models/process_noise/singer.py +52 -0
- pytcl/gravity/clenshaw.py +60 -0
- pytcl/gravity/egm.py +47 -0
- pytcl/gravity/models.py +34 -0
- pytcl/gravity/spherical_harmonics.py +73 -0
- pytcl/gravity/tides.py +34 -0
- pytcl/mathematical_functions/numerical_integration/quadrature.py +85 -0
- pytcl/mathematical_functions/special_functions/bessel.py +55 -0
- pytcl/mathematical_functions/special_functions/elliptic.py +42 -0
- pytcl/mathematical_functions/special_functions/error_functions.py +49 -0
- pytcl/mathematical_functions/special_functions/gamma_functions.py +43 -0
- pytcl/mathematical_functions/special_functions/lambert_w.py +5 -0
- pytcl/mathematical_functions/special_functions/marcum_q.py +16 -0
- pytcl/navigation/geodesy.py +101 -2
- pytcl/navigation/great_circle.py +71 -0
- pytcl/navigation/rhumb.py +74 -0
- pytcl/performance_evaluation/estimation_metrics.py +70 -0
- pytcl/performance_evaluation/track_metrics.py +30 -0
- pytcl/static_estimation/maximum_likelihood.py +54 -0
- pytcl/static_estimation/robust.py +57 -0
- pytcl/terrain/dem.py +69 -0
- pytcl/terrain/visibility.py +65 -0
- pytcl/trackers/hypothesis.py +65 -0
- {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.9.2.dist-info}/LICENSE +0 -0
- {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.9.2.dist-info}/WHEEL +0 -0
- {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.9.2.dist-info}/top_level.txt +0 -0
|
@@ -192,6 +192,17 @@ def ospa_over_time(
|
|
|
192
192
|
------
|
|
193
193
|
ValueError
|
|
194
194
|
If sequences have different lengths.
|
|
195
|
+
|
|
196
|
+
Examples
|
|
197
|
+
--------
|
|
198
|
+
>>> # Two time steps with ground truth and estimates
|
|
199
|
+
>>> X_seq = [[np.array([0, 0]), np.array([10, 10])],
|
|
200
|
+
... [np.array([1, 0]), np.array([11, 10])]]
|
|
201
|
+
>>> Y_seq = [[np.array([0.5, 0]), np.array([10, 10.5])],
|
|
202
|
+
... [np.array([1.5, 0]), np.array([11, 10.5])]]
|
|
203
|
+
>>> ospa_vals = ospa_over_time(X_seq, Y_seq, c=100, p=2)
|
|
204
|
+
>>> len(ospa_vals)
|
|
205
|
+
2
|
|
195
206
|
"""
|
|
196
207
|
if len(X_sequence) != len(Y_sequence):
|
|
197
208
|
raise ValueError("Sequences must have the same length")
|
|
@@ -340,6 +351,13 @@ def identity_switches(
|
|
|
340
351
|
-------
|
|
341
352
|
int
|
|
342
353
|
Number of identity switches.
|
|
354
|
+
|
|
355
|
+
Examples
|
|
356
|
+
--------
|
|
357
|
+
>>> true_labels = np.array([0, 0, 1, 1])
|
|
358
|
+
>>> estimated_labels = np.array([0, 0, 0, 0]) # Track 0 switches targets
|
|
359
|
+
>>> identity_switches(true_labels, estimated_labels)
|
|
360
|
+
1
|
|
343
361
|
"""
|
|
344
362
|
true_labels = np.asarray(true_labels)
|
|
345
363
|
estimated_labels = np.asarray(estimated_labels)
|
|
@@ -391,6 +409,18 @@ def mot_metrics(
|
|
|
391
409
|
MOTA (Multiple Object Tracking Accuracy) accounts for false positives,
|
|
392
410
|
misses, and identity switches. MOTP (Precision) measures localization
|
|
393
411
|
accuracy for correctly matched pairs.
|
|
412
|
+
|
|
413
|
+
Examples
|
|
414
|
+
--------
|
|
415
|
+
>>> gt = [[np.array([0, 0]), np.array([10, 10])],
|
|
416
|
+
... [np.array([1, 0]), np.array([11, 10])]]
|
|
417
|
+
>>> est = [[np.array([0.5, 0]), np.array([10.5, 10])],
|
|
418
|
+
... [np.array([1.5, 0]), np.array([11.5, 10])]]
|
|
419
|
+
>>> result = mot_metrics(gt, est, threshold=5.0)
|
|
420
|
+
>>> result.mota # High accuracy with small errors
|
|
421
|
+
1.0
|
|
422
|
+
>>> result.motp < 1.0 # Some localization error
|
|
423
|
+
True
|
|
394
424
|
"""
|
|
395
425
|
total_gt = 0
|
|
396
426
|
total_fp = 0
|
|
@@ -222,6 +222,14 @@ def fisher_information_exponential_family(
|
|
|
222
222
|
-----
|
|
223
223
|
For exponential families, I(theta) = Var[T(X)] = d^2 A(theta) / d theta^2,
|
|
224
224
|
where A(theta) is the log-partition function.
|
|
225
|
+
|
|
226
|
+
Examples
|
|
227
|
+
--------
|
|
228
|
+
>>> def suff_stats(x, theta):
|
|
229
|
+
... return np.array([x, x**2]) # Mean and second moment
|
|
230
|
+
>>> data = np.random.normal(0, 1, 100)
|
|
231
|
+
>>> theta = np.array([0.0, 1.0])
|
|
232
|
+
>>> F = fisher_information_exponential_family(suff_stats, theta, data)
|
|
225
233
|
"""
|
|
226
234
|
theta = np.asarray(theta, dtype=np.float64)
|
|
227
235
|
data = np.asarray(data, dtype=np.float64)
|
|
@@ -263,6 +271,14 @@ def observed_fisher_information(
|
|
|
263
271
|
The observed Fisher information is often more accurate for
|
|
264
272
|
finite samples and is asymptotically equivalent to the
|
|
265
273
|
expected Fisher information.
|
|
274
|
+
|
|
275
|
+
Examples
|
|
276
|
+
--------
|
|
277
|
+
>>> data = np.array([1.2, 0.8, 1.1, 0.9, 1.0])
|
|
278
|
+
>>> def log_lik(theta):
|
|
279
|
+
... return -0.5 * np.sum((data - theta[0])**2 / theta[1]) - 2.5 * np.log(theta[1])
|
|
280
|
+
>>> theta = np.array([1.0, 0.1])
|
|
281
|
+
>>> F_obs = observed_fisher_information(log_lik, theta)
|
|
266
282
|
"""
|
|
267
283
|
return fisher_information_numerical(log_likelihood, theta, h)
|
|
268
284
|
|
|
@@ -361,6 +377,14 @@ def cramer_rao_bound_biased(
|
|
|
361
377
|
-----
|
|
362
378
|
For a biased estimator with bias b(theta), the CRB becomes:
|
|
363
379
|
Var(theta_hat) >= (I + db/dtheta) I^{-1} (I + db/dtheta)^T
|
|
380
|
+
|
|
381
|
+
Examples
|
|
382
|
+
--------
|
|
383
|
+
>>> F = np.array([[10.0, 0], [0, 5.0]]) # Fisher info
|
|
384
|
+
>>> db = np.array([[0.1, 0], [0, 0.2]]) # Bias gradient
|
|
385
|
+
>>> crb_biased = cramer_rao_bound_biased(F, db)
|
|
386
|
+
>>> crb_biased.shape
|
|
387
|
+
(2, 2)
|
|
364
388
|
"""
|
|
365
389
|
F = np.asarray(fisher_info, dtype=np.float64)
|
|
366
390
|
db = np.asarray(bias_gradient, dtype=np.float64)
|
|
@@ -571,6 +595,17 @@ def mle_scoring(
|
|
|
571
595
|
-----
|
|
572
596
|
Fisher scoring update: theta_{n+1} = theta_n + I(theta_n)^{-1} @ score
|
|
573
597
|
This is equivalent to Newton-Raphson when I(theta) = -E[H].
|
|
598
|
+
|
|
599
|
+
Examples
|
|
600
|
+
--------
|
|
601
|
+
>>> data = np.array([1.0, 1.1, 0.9, 1.2, 0.8])
|
|
602
|
+
>>> def log_lik(theta):
|
|
603
|
+
... return -0.5 * len(data) * np.log(2*np.pi) - np.sum((data - theta[0])**2) / 2
|
|
604
|
+
>>> def score(theta):
|
|
605
|
+
... return np.array([np.sum(data - theta[0])])
|
|
606
|
+
>>> def fisher(theta):
|
|
607
|
+
... return np.array([[len(data)]])
|
|
608
|
+
>>> result = mle_scoring(log_lik, score, fisher, np.array([0.0]))
|
|
574
609
|
"""
|
|
575
610
|
theta = np.asarray(theta_init, dtype=np.float64).copy()
|
|
576
611
|
|
|
@@ -729,6 +764,13 @@ def aic(log_likelihood: float, n_params: int) -> float:
|
|
|
729
764
|
Notes
|
|
730
765
|
-----
|
|
731
766
|
AIC = -2 * log_likelihood + 2 * n_params
|
|
767
|
+
|
|
768
|
+
Examples
|
|
769
|
+
--------
|
|
770
|
+
>>> log_lik = -100.0
|
|
771
|
+
>>> n_params = 3
|
|
772
|
+
>>> aic(log_lik, n_params)
|
|
773
|
+
206.0
|
|
732
774
|
"""
|
|
733
775
|
return -2 * log_likelihood + 2 * n_params
|
|
734
776
|
|
|
@@ -754,6 +796,12 @@ def bic(log_likelihood: float, n_params: int, n_samples: int) -> float:
|
|
|
754
796
|
Notes
|
|
755
797
|
-----
|
|
756
798
|
BIC = -2 * log_likelihood + n_params * log(n_samples)
|
|
799
|
+
|
|
800
|
+
Examples
|
|
801
|
+
--------
|
|
802
|
+
>>> log_lik = -100.0
|
|
803
|
+
>>> bic(log_lik, n_params=3, n_samples=100)
|
|
804
|
+
213.81551055796427
|
|
757
805
|
"""
|
|
758
806
|
return -2 * log_likelihood + n_params * np.log(n_samples)
|
|
759
807
|
|
|
@@ -780,6 +828,12 @@ def aicc(log_likelihood: float, n_params: int, n_samples: int) -> float:
|
|
|
780
828
|
-----
|
|
781
829
|
AICc adds a correction for small sample sizes:
|
|
782
830
|
AICc = AIC + 2*k*(k+1)/(n-k-1)
|
|
831
|
+
|
|
832
|
+
Examples
|
|
833
|
+
--------
|
|
834
|
+
>>> log_lik = -50.0
|
|
835
|
+
>>> aicc(log_lik, n_params=3, n_samples=20)
|
|
836
|
+
109.5
|
|
783
837
|
"""
|
|
784
838
|
k = n_params
|
|
785
839
|
n = n_samples
|
|
@@ -99,6 +99,15 @@ def huber_weight(r: ArrayLike, c: float = 1.345) -> NDArray[np.floating]:
|
|
|
99
99
|
The Huber weight function is:
|
|
100
100
|
w(r) = 1 if |r| <= c
|
|
101
101
|
w(r) = c / |r| if |r| > c
|
|
102
|
+
|
|
103
|
+
Examples
|
|
104
|
+
--------
|
|
105
|
+
>>> r = np.array([0.5, 1.0, 2.0, 5.0]) # Standardized residuals
|
|
106
|
+
>>> w = huber_weight(r, c=1.345)
|
|
107
|
+
>>> w[0] # Small residual gets weight 1
|
|
108
|
+
1.0
|
|
109
|
+
>>> w[3] < 0.5 # Large residual gets reduced weight
|
|
110
|
+
True
|
|
102
111
|
"""
|
|
103
112
|
r = np.asarray(r, dtype=np.float64)
|
|
104
113
|
abs_r = np.abs(r)
|
|
@@ -129,6 +138,13 @@ def huber_rho(r: ArrayLike, c: float = 1.345) -> NDArray[np.floating]:
|
|
|
129
138
|
The Huber rho function is:
|
|
130
139
|
rho(r) = r^2 / 2 if |r| <= c
|
|
131
140
|
rho(r) = c * |r| - c^2/2 if |r| > c
|
|
141
|
+
|
|
142
|
+
Examples
|
|
143
|
+
--------
|
|
144
|
+
>>> r = np.array([0.5, 1.0, 2.0])
|
|
145
|
+
>>> rho = huber_rho(r, c=1.345)
|
|
146
|
+
>>> rho[0] # Small residual: r^2/2
|
|
147
|
+
0.125
|
|
132
148
|
"""
|
|
133
149
|
r = np.asarray(r, dtype=np.float64)
|
|
134
150
|
abs_r = np.abs(r)
|
|
@@ -160,6 +176,15 @@ def tukey_weight(r: ArrayLike, c: float = 4.685) -> NDArray[np.floating]:
|
|
|
160
176
|
w(r) = 0 if |r| > c
|
|
161
177
|
|
|
162
178
|
This provides complete rejection of large outliers.
|
|
179
|
+
|
|
180
|
+
Examples
|
|
181
|
+
--------
|
|
182
|
+
>>> r = np.array([0.5, 2.0, 5.0, 10.0])
|
|
183
|
+
>>> w = tukey_weight(r, c=4.685)
|
|
184
|
+
>>> w[0] > 0.9 # Small residual gets high weight
|
|
185
|
+
True
|
|
186
|
+
>>> w[3] # Large residual completely rejected
|
|
187
|
+
0.0
|
|
163
188
|
"""
|
|
164
189
|
r = np.asarray(r, dtype=np.float64)
|
|
165
190
|
abs_r = np.abs(r)
|
|
@@ -190,6 +215,15 @@ def tukey_rho(r: ArrayLike, c: float = 4.685) -> NDArray[np.floating]:
|
|
|
190
215
|
The Tukey rho function is:
|
|
191
216
|
rho(r) = c^2/6 * (1 - (1 - (r/c)^2)^3) if |r| <= c
|
|
192
217
|
rho(r) = c^2/6 if |r| > c
|
|
218
|
+
|
|
219
|
+
Examples
|
|
220
|
+
--------
|
|
221
|
+
>>> r = np.array([0.0, 2.0, 10.0])
|
|
222
|
+
>>> rho = tukey_rho(r, c=4.685)
|
|
223
|
+
>>> rho[0] # Zero residual
|
|
224
|
+
0.0
|
|
225
|
+
>>> rho[2] == rho[2] # Large residuals saturate at c^2/6
|
|
226
|
+
True
|
|
193
227
|
"""
|
|
194
228
|
r = np.asarray(r, dtype=np.float64)
|
|
195
229
|
abs_r = np.abs(r)
|
|
@@ -219,6 +253,15 @@ def cauchy_weight(r: ArrayLike, c: float = 2.385) -> NDArray[np.floating]:
|
|
|
219
253
|
-----
|
|
220
254
|
The Cauchy weight function is:
|
|
221
255
|
w(r) = 1 / (1 + (r/c)^2)
|
|
256
|
+
|
|
257
|
+
Examples
|
|
258
|
+
--------
|
|
259
|
+
>>> r = np.array([0.0, 1.0, 5.0])
|
|
260
|
+
>>> w = cauchy_weight(r, c=2.385)
|
|
261
|
+
>>> w[0] # Zero residual gets weight 1
|
|
262
|
+
1.0
|
|
263
|
+
>>> 0 < w[2] < 1 # Large residuals get reduced weight (but never zero)
|
|
264
|
+
True
|
|
222
265
|
"""
|
|
223
266
|
r = np.asarray(r, dtype=np.float64)
|
|
224
267
|
return 1 / (1 + (r / c) ** 2)
|
|
@@ -251,6 +294,13 @@ def mad(residuals: ArrayLike, c: float = 1.4826) -> float:
|
|
|
251
294
|
MAD = c * median(|r - median(r)|)
|
|
252
295
|
|
|
253
296
|
This is a robust scale estimator with 50% breakdown point.
|
|
297
|
+
|
|
298
|
+
Examples
|
|
299
|
+
--------
|
|
300
|
+
>>> residuals = np.array([1.0, 1.1, 0.9, 1.0, 100.0]) # One outlier
|
|
301
|
+
>>> scale = mad(residuals)
|
|
302
|
+
>>> scale < 1.0 # Robust to the outlier
|
|
303
|
+
True
|
|
254
304
|
"""
|
|
255
305
|
r = np.asarray(residuals, dtype=np.float64)
|
|
256
306
|
return c * float(np.median(np.abs(r - np.median(r))))
|
|
@@ -279,6 +329,13 @@ def tau_scale(
|
|
|
279
329
|
Notes
|
|
280
330
|
-----
|
|
281
331
|
Tau scale combines high breakdown point with efficiency.
|
|
332
|
+
|
|
333
|
+
Examples
|
|
334
|
+
--------
|
|
335
|
+
>>> residuals = np.array([1.0, 1.1, 0.9, 1.0, 1.2, 100.0]) # One outlier
|
|
336
|
+
>>> scale = tau_scale(residuals)
|
|
337
|
+
>>> scale < 10.0 # Robust to the outlier
|
|
338
|
+
True
|
|
282
339
|
"""
|
|
283
340
|
r = np.asarray(residuals, dtype=np.float64)
|
|
284
341
|
n = len(r)
|
pytcl/terrain/dem.py
CHANGED
|
@@ -438,6 +438,19 @@ def get_elevation_profile(
|
|
|
438
438
|
Array of distances from start in meters.
|
|
439
439
|
elevations : ndarray
|
|
440
440
|
Array of elevation values in meters.
|
|
441
|
+
|
|
442
|
+
Examples
|
|
443
|
+
--------
|
|
444
|
+
>>> import numpy as np
|
|
445
|
+
>>> dem = create_flat_dem(
|
|
446
|
+
... np.radians(35), np.radians(36),
|
|
447
|
+
... np.radians(-120), np.radians(-119),
|
|
448
|
+
... elevation=500)
|
|
449
|
+
>>> dists, elevs = get_elevation_profile(
|
|
450
|
+
... dem, np.radians(35.2), np.radians(-119.8),
|
|
451
|
+
... np.radians(35.8), np.radians(-119.2), n_points=10)
|
|
452
|
+
>>> len(dists) == 10
|
|
453
|
+
True
|
|
441
454
|
"""
|
|
442
455
|
# Generate points along path
|
|
443
456
|
lats = np.linspace(lat_start, lat_end, n_points)
|
|
@@ -492,6 +505,21 @@ def interpolate_dem(
|
|
|
492
505
|
-------
|
|
493
506
|
DEMGrid
|
|
494
507
|
New interpolated DEM grid.
|
|
508
|
+
|
|
509
|
+
Examples
|
|
510
|
+
--------
|
|
511
|
+
>>> import numpy as np
|
|
512
|
+
>>> dem = create_flat_dem(
|
|
513
|
+
... np.radians(35), np.radians(36),
|
|
514
|
+
... np.radians(-120), np.radians(-119),
|
|
515
|
+
... elevation=100)
|
|
516
|
+
>>> new_dem = interpolate_dem(
|
|
517
|
+
... dem,
|
|
518
|
+
... np.radians(35.2), np.radians(35.8),
|
|
519
|
+
... np.radians(-119.8), np.radians(-119.2),
|
|
520
|
+
... new_n_lat=5, new_n_lon=5)
|
|
521
|
+
>>> new_dem.data.shape
|
|
522
|
+
(5, 5)
|
|
495
523
|
"""
|
|
496
524
|
# Create new coordinate arrays
|
|
497
525
|
new_lats = np.linspace(new_lat_min, new_lat_max, new_n_lat)
|
|
@@ -547,6 +575,22 @@ def merge_dems(
|
|
|
547
575
|
-------
|
|
548
576
|
DEMGrid
|
|
549
577
|
Merged DEM grid.
|
|
578
|
+
|
|
579
|
+
Examples
|
|
580
|
+
--------
|
|
581
|
+
>>> import numpy as np
|
|
582
|
+
>>> dem1 = create_flat_dem(
|
|
583
|
+
... np.radians(35), np.radians(36),
|
|
584
|
+
... np.radians(-120), np.radians(-119), elevation=100)
|
|
585
|
+
>>> dem2 = create_flat_dem(
|
|
586
|
+
... np.radians(36), np.radians(37),
|
|
587
|
+
... np.radians(-120), np.radians(-119), elevation=200)
|
|
588
|
+
>>> merged = merge_dems(
|
|
589
|
+
... [dem1, dem2],
|
|
590
|
+
... np.radians(35), np.radians(37),
|
|
591
|
+
... np.radians(-120), np.radians(-119))
|
|
592
|
+
>>> merged.name
|
|
593
|
+
'Merged DEM'
|
|
550
594
|
"""
|
|
551
595
|
# Compute output grid dimensions
|
|
552
596
|
d_lat = np.radians(resolution_arcsec / 3600)
|
|
@@ -611,6 +655,19 @@ def create_flat_dem(
|
|
|
611
655
|
-------
|
|
612
656
|
DEMGrid
|
|
613
657
|
Flat DEM grid.
|
|
658
|
+
|
|
659
|
+
Examples
|
|
660
|
+
--------
|
|
661
|
+
>>> import numpy as np
|
|
662
|
+
>>> dem = create_flat_dem(
|
|
663
|
+
... np.radians(35), np.radians(36),
|
|
664
|
+
... np.radians(-120), np.radians(-119),
|
|
665
|
+
... elevation=500)
|
|
666
|
+
>>> dem.name
|
|
667
|
+
'Flat DEM'
|
|
668
|
+
>>> result = dem.get_elevation(np.radians(35.5), np.radians(-119.5))
|
|
669
|
+
>>> abs(result.elevation - 500) < 1
|
|
670
|
+
True
|
|
614
671
|
"""
|
|
615
672
|
d_lat = np.radians(resolution_arcsec / 3600)
|
|
616
673
|
d_lon = np.radians(resolution_arcsec / 3600)
|
|
@@ -672,6 +729,18 @@ def create_synthetic_terrain(
|
|
|
672
729
|
-------
|
|
673
730
|
DEMGrid
|
|
674
731
|
Synthetic terrain DEM.
|
|
732
|
+
|
|
733
|
+
Examples
|
|
734
|
+
--------
|
|
735
|
+
>>> import numpy as np
|
|
736
|
+
>>> dem = create_synthetic_terrain(
|
|
737
|
+
... np.radians(35), np.radians(36),
|
|
738
|
+
... np.radians(-120), np.radians(-119),
|
|
739
|
+
... base_elevation=500, amplitude=200, seed=42)
|
|
740
|
+
>>> dem.name
|
|
741
|
+
'Synthetic Terrain'
|
|
742
|
+
>>> dem.data.min() < dem.data.max() # Has elevation variation
|
|
743
|
+
True
|
|
675
744
|
"""
|
|
676
745
|
if seed is not None:
|
|
677
746
|
np.random.seed(seed)
|
pytcl/terrain/visibility.py
CHANGED
|
@@ -160,6 +160,20 @@ def line_of_sight(
|
|
|
160
160
|
The refraction coefficient models atmospheric bending of radio waves.
|
|
161
161
|
A typical value for radio frequencies is 0.13 (4/3 Earth model).
|
|
162
162
|
For optical line of sight, use 0.
|
|
163
|
+
|
|
164
|
+
Examples
|
|
165
|
+
--------
|
|
166
|
+
>>> import numpy as np
|
|
167
|
+
>>> from pytcl.terrain.dem import create_flat_dem
|
|
168
|
+
>>> dem = create_flat_dem(
|
|
169
|
+
... np.radians(35), np.radians(36),
|
|
170
|
+
... np.radians(-120), np.radians(-119), elevation=100)
|
|
171
|
+
>>> result = line_of_sight(
|
|
172
|
+
... dem,
|
|
173
|
+
... np.radians(35.3), np.radians(-119.7), 10,
|
|
174
|
+
... np.radians(35.7), np.radians(-119.3), 10)
|
|
175
|
+
>>> result.visible # Clear LOS over flat terrain
|
|
176
|
+
True
|
|
163
177
|
"""
|
|
164
178
|
# Effective Earth radius for refraction
|
|
165
179
|
if refraction_coeff > 0:
|
|
@@ -296,6 +310,19 @@ def viewshed(
|
|
|
296
310
|
-------
|
|
297
311
|
ViewshedResult
|
|
298
312
|
Viewshed computation result with visibility grid.
|
|
313
|
+
|
|
314
|
+
Examples
|
|
315
|
+
--------
|
|
316
|
+
>>> import numpy as np
|
|
317
|
+
>>> from pytcl.terrain.dem import create_flat_dem
|
|
318
|
+
>>> dem = create_flat_dem(
|
|
319
|
+
... np.radians(35), np.radians(36),
|
|
320
|
+
... np.radians(-120), np.radians(-119), elevation=100)
|
|
321
|
+
>>> result = viewshed(
|
|
322
|
+
... dem, np.radians(35.5), np.radians(-119.5), 20,
|
|
323
|
+
... max_range=10000, n_radials=36, samples_per_radial=10)
|
|
324
|
+
>>> result.visible.any() # Some cells visible
|
|
325
|
+
True
|
|
299
326
|
"""
|
|
300
327
|
# Convert max range to angular distance
|
|
301
328
|
max_angular_range = max_range / earth_radius
|
|
@@ -428,6 +455,19 @@ def compute_horizon(
|
|
|
428
455
|
-------
|
|
429
456
|
list of HorizonPoint
|
|
430
457
|
Horizon points for each azimuth direction.
|
|
458
|
+
|
|
459
|
+
Examples
|
|
460
|
+
--------
|
|
461
|
+
>>> import numpy as np
|
|
462
|
+
>>> from pytcl.terrain.dem import create_flat_dem
|
|
463
|
+
>>> dem = create_flat_dem(
|
|
464
|
+
... np.radians(35), np.radians(36),
|
|
465
|
+
... np.radians(-120), np.radians(-119), elevation=100)
|
|
466
|
+
>>> horizon = compute_horizon(
|
|
467
|
+
... dem, np.radians(35.5), np.radians(-119.5), 10,
|
|
468
|
+
... n_azimuths=8, max_range=10000, samples_per_radial=10)
|
|
469
|
+
>>> len(horizon)
|
|
470
|
+
8
|
|
431
471
|
"""
|
|
432
472
|
max_angular_range = max_range / earth_radius
|
|
433
473
|
|
|
@@ -534,6 +574,18 @@ def terrain_masking_angle(
|
|
|
534
574
|
-------
|
|
535
575
|
float
|
|
536
576
|
Masking angle in radians above horizontal.
|
|
577
|
+
|
|
578
|
+
Examples
|
|
579
|
+
--------
|
|
580
|
+
>>> import numpy as np
|
|
581
|
+
>>> from pytcl.terrain.dem import create_flat_dem
|
|
582
|
+
>>> dem = create_flat_dem(
|
|
583
|
+
... np.radians(35), np.radians(36),
|
|
584
|
+
... np.radians(-120), np.radians(-119), elevation=100)
|
|
585
|
+
>>> angle = terrain_masking_angle(
|
|
586
|
+
... dem, np.radians(35.5), np.radians(-119.5), 10, azimuth=0)
|
|
587
|
+
>>> -np.pi/2 <= angle <= np.pi/2 # Valid angle range
|
|
588
|
+
True
|
|
537
589
|
"""
|
|
538
590
|
max_angular_range = max_range / earth_radius
|
|
539
591
|
|
|
@@ -628,6 +680,19 @@ def radar_coverage_map(
|
|
|
628
680
|
-------
|
|
629
681
|
ViewshedResult
|
|
630
682
|
Radar coverage map.
|
|
683
|
+
|
|
684
|
+
Examples
|
|
685
|
+
--------
|
|
686
|
+
>>> import numpy as np
|
|
687
|
+
>>> from pytcl.terrain.dem import create_flat_dem
|
|
688
|
+
>>> dem = create_flat_dem(
|
|
689
|
+
... np.radians(35), np.radians(36),
|
|
690
|
+
... np.radians(-120), np.radians(-119), elevation=100)
|
|
691
|
+
>>> coverage = radar_coverage_map(
|
|
692
|
+
... dem, np.radians(35.5), np.radians(-119.5), 30,
|
|
693
|
+
... max_range=20000, n_radials=36, samples_per_radial=20)
|
|
694
|
+
>>> coverage.visible.any() # Some coverage exists
|
|
695
|
+
True
|
|
631
696
|
"""
|
|
632
697
|
# Compute basic viewshed with refraction
|
|
633
698
|
result = viewshed(
|
pytcl/trackers/hypothesis.py
CHANGED
|
@@ -208,6 +208,29 @@ def compute_association_likelihood(
|
|
|
208
208
|
-------
|
|
209
209
|
likelihood : float
|
|
210
210
|
Joint likelihood of the association.
|
|
211
|
+
|
|
212
|
+
Examples
|
|
213
|
+
--------
|
|
214
|
+
>>> import numpy as np
|
|
215
|
+
>>> # 2 tracks, 2 measurements
|
|
216
|
+
>>> likelihood_matrix = np.array([[0.9, 0.1],
|
|
217
|
+
... [0.1, 0.8]])
|
|
218
|
+
>>> # Association: track 0 -> meas 0, track 1 -> meas 1
|
|
219
|
+
>>> association = {0: 0, 1: 1}
|
|
220
|
+
>>> lik = compute_association_likelihood(
|
|
221
|
+
... association, likelihood_matrix,
|
|
222
|
+
... detection_prob=0.9, clutter_density=1e-6, n_meas=2
|
|
223
|
+
... )
|
|
224
|
+
>>> lik > 0
|
|
225
|
+
True
|
|
226
|
+
>>> # Association with missed detection
|
|
227
|
+
>>> assoc_miss = {0: 0, 1: -1} # track 1 misses
|
|
228
|
+
>>> lik_miss = compute_association_likelihood(
|
|
229
|
+
... assoc_miss, likelihood_matrix,
|
|
230
|
+
... detection_prob=0.9, clutter_density=1e-6, n_meas=2
|
|
231
|
+
... )
|
|
232
|
+
>>> lik > lik_miss # Full detection more likely
|
|
233
|
+
True
|
|
211
234
|
"""
|
|
212
235
|
likelihood = 1.0
|
|
213
236
|
|
|
@@ -259,6 +282,31 @@ def n_scan_prune(
|
|
|
259
282
|
committed_track_ids : set
|
|
260
283
|
Track IDs that are now committed (survived N-scan).
|
|
261
284
|
|
|
285
|
+
Examples
|
|
286
|
+
--------
|
|
287
|
+
>>> import numpy as np
|
|
288
|
+
>>> from pytcl.trackers.hypothesis import (
|
|
289
|
+
... Hypothesis, MHTTrack, MHTTrackStatus, n_scan_prune
|
|
290
|
+
... )
|
|
291
|
+
>>> # Two hypotheses, tracks with different creation scans
|
|
292
|
+
>>> track1 = MHTTrack(id=0, state=np.zeros(2), covariance=np.eye(2),
|
|
293
|
+
... score=1.0, status=MHTTrackStatus.CONFIRMED,
|
|
294
|
+
... history=[0], parent_id=-1, scan_created=0,
|
|
295
|
+
... n_hits=3, n_misses=0)
|
|
296
|
+
>>> track2 = MHTTrack(id=1, state=np.zeros(2), covariance=np.eye(2),
|
|
297
|
+
... score=0.5, status=MHTTrackStatus.TENTATIVE,
|
|
298
|
+
... history=[1], parent_id=-1, scan_created=2,
|
|
299
|
+
... n_hits=1, n_misses=0)
|
|
300
|
+
>>> tracks = {0: track1, 1: track2}
|
|
301
|
+
>>> hyp1 = Hypothesis(id=0, probability=0.8, track_ids=[0],
|
|
302
|
+
... scan_created=0, parent_id=-1)
|
|
303
|
+
>>> hyp2 = Hypothesis(id=1, probability=0.2, track_ids=[1],
|
|
304
|
+
... scan_created=2, parent_id=-1)
|
|
305
|
+
>>> pruned, committed = n_scan_prune([hyp1, hyp2], tracks, n_scan=2,
|
|
306
|
+
... current_scan=3)
|
|
307
|
+
>>> len(pruned) >= 1
|
|
308
|
+
True
|
|
309
|
+
|
|
262
310
|
Notes
|
|
263
311
|
-----
|
|
264
312
|
N-scan pruning works by:
|
|
@@ -338,6 +386,23 @@ def prune_hypotheses_by_probability(
|
|
|
338
386
|
-------
|
|
339
387
|
pruned : list of Hypothesis
|
|
340
388
|
Pruned and renormalized hypotheses.
|
|
389
|
+
|
|
390
|
+
Examples
|
|
391
|
+
--------
|
|
392
|
+
>>> from pytcl.trackers.hypothesis import Hypothesis, prune_hypotheses_by_probability
|
|
393
|
+
>>> # 5 hypotheses with varying probabilities
|
|
394
|
+
>>> hyps = [
|
|
395
|
+
... Hypothesis(id=0, probability=0.5, track_ids=[0], scan_created=0, parent_id=-1),
|
|
396
|
+
... Hypothesis(id=1, probability=0.3, track_ids=[1], scan_created=0, parent_id=-1),
|
|
397
|
+
... Hypothesis(id=2, probability=0.1, track_ids=[2], scan_created=0, parent_id=-1),
|
|
398
|
+
... Hypothesis(id=3, probability=0.05, track_ids=[3], scan_created=0, parent_id=-1),
|
|
399
|
+
... Hypothesis(id=4, probability=1e-8, track_ids=[4], scan_created=0, parent_id=-1),
|
|
400
|
+
... ]
|
|
401
|
+
>>> pruned = prune_hypotheses_by_probability(hyps, max_hypotheses=3)
|
|
402
|
+
>>> len(pruned) # Only top 3 kept
|
|
403
|
+
3
|
|
404
|
+
>>> sum(h.probability for h in pruned) # Renormalized to 1
|
|
405
|
+
1.0
|
|
341
406
|
"""
|
|
342
407
|
if not hypotheses:
|
|
343
408
|
return []
|
|
File without changes
|
|
File without changes
|
|
File without changes
|