nrl-tracker 1.9.0__py3-none-any.whl → 1.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. {nrl_tracker-1.9.0.dist-info → nrl_tracker-1.9.2.dist-info}/METADATA +4 -4
  2. {nrl_tracker-1.9.0.dist-info → nrl_tracker-1.9.2.dist-info}/RECORD +61 -60
  3. pytcl/__init__.py +2 -2
  4. pytcl/assignment_algorithms/gating.py +18 -0
  5. pytcl/assignment_algorithms/jpda.py +56 -0
  6. pytcl/assignment_algorithms/nd_assignment.py +65 -0
  7. pytcl/assignment_algorithms/network_flow.py +40 -0
  8. pytcl/astronomical/ephemerides.py +18 -0
  9. pytcl/astronomical/orbital_mechanics.py +131 -0
  10. pytcl/atmosphere/ionosphere.py +44 -0
  11. pytcl/atmosphere/models.py +29 -0
  12. pytcl/clustering/dbscan.py +9 -0
  13. pytcl/clustering/gaussian_mixture.py +20 -0
  14. pytcl/clustering/hierarchical.py +29 -0
  15. pytcl/clustering/kmeans.py +9 -0
  16. pytcl/coordinate_systems/conversions/geodetic.py +46 -0
  17. pytcl/coordinate_systems/conversions/spherical.py +35 -0
  18. pytcl/coordinate_systems/rotations/rotations.py +147 -0
  19. pytcl/core/__init__.py +16 -0
  20. pytcl/core/maturity.py +346 -0
  21. pytcl/core/optional_deps.py +1 -1
  22. pytcl/dynamic_estimation/gaussian_sum_filter.py +55 -0
  23. pytcl/dynamic_estimation/imm.py +29 -0
  24. pytcl/dynamic_estimation/information_filter.py +64 -0
  25. pytcl/dynamic_estimation/kalman/extended.py +56 -0
  26. pytcl/dynamic_estimation/kalman/linear.py +69 -0
  27. pytcl/dynamic_estimation/kalman/unscented.py +81 -0
  28. pytcl/dynamic_estimation/particle_filters/bootstrap.py +146 -0
  29. pytcl/dynamic_estimation/rbpf.py +51 -0
  30. pytcl/dynamic_estimation/smoothers.py +58 -0
  31. pytcl/dynamic_models/continuous_time/dynamics.py +104 -0
  32. pytcl/dynamic_models/discrete_time/coordinated_turn.py +6 -0
  33. pytcl/dynamic_models/discrete_time/singer.py +12 -0
  34. pytcl/dynamic_models/process_noise/coordinated_turn.py +46 -0
  35. pytcl/dynamic_models/process_noise/polynomial.py +6 -0
  36. pytcl/dynamic_models/process_noise/singer.py +52 -0
  37. pytcl/gravity/clenshaw.py +60 -0
  38. pytcl/gravity/egm.py +47 -0
  39. pytcl/gravity/models.py +34 -0
  40. pytcl/gravity/spherical_harmonics.py +73 -0
  41. pytcl/gravity/tides.py +34 -0
  42. pytcl/mathematical_functions/numerical_integration/quadrature.py +85 -0
  43. pytcl/mathematical_functions/special_functions/bessel.py +55 -0
  44. pytcl/mathematical_functions/special_functions/elliptic.py +42 -0
  45. pytcl/mathematical_functions/special_functions/error_functions.py +49 -0
  46. pytcl/mathematical_functions/special_functions/gamma_functions.py +43 -0
  47. pytcl/mathematical_functions/special_functions/lambert_w.py +5 -0
  48. pytcl/mathematical_functions/special_functions/marcum_q.py +16 -0
  49. pytcl/navigation/geodesy.py +101 -2
  50. pytcl/navigation/great_circle.py +71 -0
  51. pytcl/navigation/rhumb.py +74 -0
  52. pytcl/performance_evaluation/estimation_metrics.py +70 -0
  53. pytcl/performance_evaluation/track_metrics.py +30 -0
  54. pytcl/static_estimation/maximum_likelihood.py +54 -0
  55. pytcl/static_estimation/robust.py +57 -0
  56. pytcl/terrain/dem.py +69 -0
  57. pytcl/terrain/visibility.py +65 -0
  58. pytcl/trackers/hypothesis.py +65 -0
  59. {nrl_tracker-1.9.0.dist-info → nrl_tracker-1.9.2.dist-info}/LICENSE +0 -0
  60. {nrl_tracker-1.9.0.dist-info → nrl_tracker-1.9.2.dist-info}/WHEEL +0 -0
  61. {nrl_tracker-1.9.0.dist-info → nrl_tracker-1.9.2.dist-info}/top_level.txt +0 -0
pytcl/navigation/rhumb.py CHANGED
@@ -263,6 +263,15 @@ def indirect_rhumb_spherical(
263
263
  -------
264
264
  RhumbResult
265
265
  Distance and constant bearing.
266
+
267
+ Examples
268
+ --------
269
+ >>> import numpy as np
270
+ >>> lat1, lon1 = np.radians(40), np.radians(-74) # New York
271
+ >>> lat2, lon2 = np.radians(51), np.radians(0) # London
272
+ >>> result = indirect_rhumb_spherical(lat1, lon1, lat2, lon2)
273
+ >>> result.distance > 5000000 # Over 5000 km
274
+ True
266
275
  """
267
276
  distance = rhumb_distance_spherical(lat1, lon1, lat2, lon2, radius)
268
277
  bearing = rhumb_bearing(lat1, lon1, lat2, lon2)
@@ -354,6 +363,15 @@ def rhumb_distance_ellipsoidal(
354
363
  -------
355
364
  float
356
365
  Rhumb line distance in meters.
366
+
367
+ Examples
368
+ --------
369
+ >>> import numpy as np
370
+ >>> lat1, lon1 = np.radians(40), np.radians(-74)
371
+ >>> lat2, lon2 = np.radians(51), np.radians(0)
372
+ >>> dist = rhumb_distance_ellipsoidal(lat1, lon1, lat2, lon2)
373
+ >>> dist > 5000000 # Over 5000 km
374
+ True
357
375
  """
358
376
  a = ellipsoid.a
359
377
  e2 = ellipsoid.e2
@@ -419,6 +437,15 @@ def indirect_rhumb(
419
437
  -------
420
438
  RhumbResult
421
439
  Distance and constant bearing.
440
+
441
+ Examples
442
+ --------
443
+ >>> import numpy as np
444
+ >>> lat1, lon1 = np.radians(40), np.radians(-74) # New York
445
+ >>> lat2, lon2 = np.radians(51), np.radians(0) # London
446
+ >>> result = indirect_rhumb(lat1, lon1, lat2, lon2)
447
+ >>> 0 < result.bearing < np.pi # Eastward bearing
448
+ True
422
449
  """
423
450
  distance = rhumb_distance_ellipsoidal(lat1, lon1, lat2, lon2, ellipsoid)
424
451
 
@@ -460,6 +487,15 @@ def direct_rhumb(
460
487
  -------
461
488
  RhumbDirectResult
462
489
  Destination latitude and longitude.
490
+
491
+ Examples
492
+ --------
493
+ >>> import numpy as np
494
+ >>> lat, lon = np.radians(40), np.radians(-74)
495
+ >>> bearing = np.radians(90) # Due east
496
+ >>> dest = direct_rhumb(lat, lon, bearing, 100000) # 100 km
497
+ >>> np.degrees(dest.lon) > -74 # Moved east
498
+ True
463
499
  """
464
500
  a = ellipsoid.a
465
501
  e2 = ellipsoid.e2
@@ -534,6 +570,17 @@ def rhumb_intersect(
534
570
  RhumbIntersectionResult
535
571
  Intersection point and validity flag.
536
572
 
573
+ Examples
574
+ --------
575
+ >>> import numpy as np
576
+ >>> lat1, lon1 = np.radians(40), np.radians(-74)
577
+ >>> lat2, lon2 = np.radians(51), np.radians(0)
578
+ >>> bearing1 = np.radians(45)
579
+ >>> bearing2 = np.radians(270)
580
+ >>> result = rhumb_intersect(lat1, lon1, bearing1, lat2, lon2, bearing2)
581
+ >>> result.valid # May or may not intersect
582
+ True
583
+
537
584
  Notes
538
585
  -----
539
586
  Unlike great circles, two rhumb lines may not intersect (if bearings
@@ -612,6 +659,15 @@ def rhumb_midpoint(
612
659
  -------
613
660
  RhumbDirectResult
614
661
  Midpoint latitude and longitude.
662
+
663
+ Examples
664
+ --------
665
+ >>> import numpy as np
666
+ >>> lat1, lon1 = np.radians(0), np.radians(0)
667
+ >>> lat2, lon2 = np.radians(10), np.radians(10)
668
+ >>> mid = rhumb_midpoint(lat1, lon1, lat2, lon2)
669
+ >>> np.isclose(np.degrees(mid.lat), 5, atol=0.1)
670
+ True
615
671
  """
616
672
  result = indirect_rhumb_spherical(lat1, lon1, lat2, lon2)
617
673
  return direct_rhumb_spherical(lat1, lon1, result.bearing, result.distance / 2)
@@ -643,6 +699,15 @@ def rhumb_waypoints(
643
699
  -------
644
700
  lats, lons : ndarray
645
701
  Arrays of waypoint latitudes and longitudes in radians.
702
+
703
+ Examples
704
+ --------
705
+ >>> import numpy as np
706
+ >>> lat1, lon1 = np.radians(40), np.radians(-74)
707
+ >>> lat2, lon2 = np.radians(51), np.radians(0)
708
+ >>> lats, lons = rhumb_waypoints(lat1, lon1, lat2, lon2, 5)
709
+ >>> len(lats)
710
+ 5
646
711
  """
647
712
  result = indirect_rhumb_spherical(lat1, lon1, lat2, lon2, radius)
648
713
 
@@ -685,6 +750,15 @@ def compare_great_circle_rhumb(
685
750
  Rhumb line distance in meters.
686
751
  difference_percent : float
687
752
  Percentage difference (rhumb is longer).
753
+
754
+ Examples
755
+ --------
756
+ >>> import numpy as np
757
+ >>> lat1, lon1 = np.radians(40), np.radians(-74) # NYC
758
+ >>> lat2, lon2 = np.radians(51), np.radians(0) # London
759
+ >>> gc, rhumb, diff = compare_great_circle_rhumb(lat1, lon1, lat2, lon2)
760
+ >>> rhumb > gc # Rhumb is always longer
761
+ True
688
762
  """
689
763
  from pytcl.navigation.great_circle import great_circle_distance
690
764
 
@@ -140,6 +140,14 @@ def velocity_rmse(
140
140
  -------
141
141
  float
142
142
  Velocity RMSE.
143
+
144
+ Examples
145
+ --------
146
+ >>> # State = [x, vx, y, vy], velocities are indices [1, 3]
147
+ >>> true = np.array([[0, 10, 0, 5], [1, 10, 0.5, 5]])
148
+ >>> est = np.array([[0, 9.5, 0, 5.2], [1, 10.2, 0.5, 4.9]])
149
+ >>> velocity_rmse(true, est, [1, 3]) # doctest: +SKIP
150
+ 0.316...
143
151
  """
144
152
  true_vel = true_states[:, velocity_indices]
145
153
  est_vel = estimated_states[:, velocity_indices]
@@ -216,6 +224,15 @@ def nees_sequence(
216
224
  -------
217
225
  ndarray
218
226
  NEES values for each time step, shape (N,).
227
+
228
+ Examples
229
+ --------
230
+ >>> true = np.array([[1.0, 2.0], [1.5, 2.5]])
231
+ >>> est = np.array([[1.1, 1.9], [1.6, 2.4]])
232
+ >>> P = np.array([np.eye(2) * 0.1, np.eye(2) * 0.1])
233
+ >>> nees_vals = nees_sequence(true, est, P)
234
+ >>> len(nees_vals)
235
+ 2
219
236
  """
220
237
  N = true_states.shape[0]
221
238
  nees_values = np.zeros(N)
@@ -247,6 +264,15 @@ def average_nees(
247
264
  -------
248
265
  float
249
266
  Average NEES (should be close to state_dim for consistent filter).
267
+
268
+ Examples
269
+ --------
270
+ >>> true = np.array([[1.0, 2.0], [1.5, 2.5], [2.0, 3.0]])
271
+ >>> est = np.array([[1.1, 1.9], [1.6, 2.4], [2.1, 2.9]])
272
+ >>> P = np.array([np.eye(2) * 0.1] * 3)
273
+ >>> avg = average_nees(true, est, P)
274
+ >>> avg # Should be close to state_dim=2 for consistent filter
275
+ 0.2
250
276
  """
251
277
  return float(np.mean(nees_sequence(true_states, estimated_states, covariances)))
252
278
 
@@ -279,6 +305,13 @@ def nis(
279
305
 
280
306
  where nu = z - H*x_pred is the innovation and S is the innovation
281
307
  covariance.
308
+
309
+ Examples
310
+ --------
311
+ >>> nu = np.array([0.5, -0.3]) # Innovation vector
312
+ >>> S = np.eye(2) * 0.25 # Innovation covariance
313
+ >>> nis(nu, S)
314
+ 1.36
282
315
  """
283
316
  innovation = np.asarray(innovation)
284
317
  innovation_covariance = np.asarray(innovation_covariance)
@@ -305,6 +338,14 @@ def nis_sequence(
305
338
  -------
306
339
  ndarray
307
340
  NIS values for each time step.
341
+
342
+ Examples
343
+ --------
344
+ >>> innovations = np.array([[0.5, -0.3], [0.2, 0.1]])
345
+ >>> S = np.array([np.eye(2) * 0.25, np.eye(2) * 0.25])
346
+ >>> nis_vals = nis_sequence(innovations, S)
347
+ >>> len(nis_vals)
348
+ 2
308
349
  """
309
350
  N = innovations.shape[0]
310
351
  nis_values = np.zeros(N)
@@ -395,6 +436,15 @@ def credibility_interval(
395
436
  -------
396
437
  float
397
438
  Fraction of errors within the interval.
439
+
440
+ Examples
441
+ --------
442
+ >>> rng = np.random.default_rng(42)
443
+ >>> errors = rng.normal(0, 0.1, (100, 2)) # Small errors
444
+ >>> P = np.array([np.eye(2) * 0.1] * 100) # Matching covariance
445
+ >>> frac = credibility_interval(errors, P, interval=0.95)
446
+ >>> frac > 0.9 # Most errors within interval
447
+ True
398
448
  """
399
449
  N = len(errors)
400
450
  state_dim = errors.shape[1]
@@ -430,6 +480,16 @@ def monte_carlo_rmse(
430
480
  -------
431
481
  ndarray
432
482
  RMSE values.
483
+
484
+ Examples
485
+ --------
486
+ >>> # 3 Monte Carlo runs, 2 time steps, 2 state components
487
+ >>> errors = np.array([[[0.1, 0.2], [0.15, 0.1]],
488
+ ... [[0.05, 0.1], [0.2, 0.15]],
489
+ ... [[0.15, 0.05], [0.1, 0.2]]])
490
+ >>> rmse_per_time = monte_carlo_rmse(errors, axis=0)
491
+ >>> rmse_per_time.shape
492
+ (2, 2)
433
493
  """
434
494
  return np.sqrt(np.mean(errors**2, axis=axis))
435
495
 
@@ -453,6 +513,16 @@ def estimation_error_bounds(
453
513
  ndarray
454
514
  Error bounds (standard deviations) for each component,
455
515
  shape (N, state_dim).
516
+
517
+ Examples
518
+ --------
519
+ >>> P = np.array([[[1.0, 0], [0, 4.0]],
520
+ ... [[0.25, 0], [0, 1.0]]])
521
+ >>> bounds = estimation_error_bounds(P, sigma=2.0)
522
+ >>> bounds[0] # 2-sigma bounds: 2*sqrt(1), 2*sqrt(4)
523
+ array([2., 4.])
524
+ >>> bounds[1] # 2-sigma bounds: 2*sqrt(0.25), 2*sqrt(1)
525
+ array([1., 2.])
456
526
  """
457
527
  # Extract diagonal elements (variances)
458
528
  variances = np.diagonal(covariances, axis1=1, axis2=2)
@@ -192,6 +192,17 @@ def ospa_over_time(
192
192
  ------
193
193
  ValueError
194
194
  If sequences have different lengths.
195
+
196
+ Examples
197
+ --------
198
+ >>> # Two time steps with ground truth and estimates
199
+ >>> X_seq = [[np.array([0, 0]), np.array([10, 10])],
200
+ ... [np.array([1, 0]), np.array([11, 10])]]
201
+ >>> Y_seq = [[np.array([0.5, 0]), np.array([10, 10.5])],
202
+ ... [np.array([1.5, 0]), np.array([11, 10.5])]]
203
+ >>> ospa_vals = ospa_over_time(X_seq, Y_seq, c=100, p=2)
204
+ >>> len(ospa_vals)
205
+ 2
195
206
  """
196
207
  if len(X_sequence) != len(Y_sequence):
197
208
  raise ValueError("Sequences must have the same length")
@@ -340,6 +351,13 @@ def identity_switches(
340
351
  -------
341
352
  int
342
353
  Number of identity switches.
354
+
355
+ Examples
356
+ --------
357
+ >>> true_labels = np.array([0, 0, 1, 1])
358
+ >>> estimated_labels = np.array([0, 0, 0, 0]) # Track 0 switches targets
359
+ >>> identity_switches(true_labels, estimated_labels)
360
+ 1
343
361
  """
344
362
  true_labels = np.asarray(true_labels)
345
363
  estimated_labels = np.asarray(estimated_labels)
@@ -391,6 +409,18 @@ def mot_metrics(
391
409
  MOTA (Multiple Object Tracking Accuracy) accounts for false positives,
392
410
  misses, and identity switches. MOTP (Precision) measures localization
393
411
  accuracy for correctly matched pairs.
412
+
413
+ Examples
414
+ --------
415
+ >>> gt = [[np.array([0, 0]), np.array([10, 10])],
416
+ ... [np.array([1, 0]), np.array([11, 10])]]
417
+ >>> est = [[np.array([0.5, 0]), np.array([10.5, 10])],
418
+ ... [np.array([1.5, 0]), np.array([11.5, 10])]]
419
+ >>> result = mot_metrics(gt, est, threshold=5.0)
420
+ >>> result.mota # High accuracy with small errors
421
+ 1.0
422
+ >>> result.motp < 1.0 # Some localization error
423
+ True
394
424
  """
395
425
  total_gt = 0
396
426
  total_fp = 0
@@ -222,6 +222,14 @@ def fisher_information_exponential_family(
222
222
  -----
223
223
  For exponential families, I(theta) = Var[T(X)] = d^2 A(theta) / d theta^2,
224
224
  where A(theta) is the log-partition function.
225
+
226
+ Examples
227
+ --------
228
+ >>> def suff_stats(x, theta):
229
+ ... return np.array([x, x**2]) # Mean and second moment
230
+ >>> data = np.random.normal(0, 1, 100)
231
+ >>> theta = np.array([0.0, 1.0])
232
+ >>> F = fisher_information_exponential_family(suff_stats, theta, data)
225
233
  """
226
234
  theta = np.asarray(theta, dtype=np.float64)
227
235
  data = np.asarray(data, dtype=np.float64)
@@ -263,6 +271,14 @@ def observed_fisher_information(
263
271
  The observed Fisher information is often more accurate for
264
272
  finite samples and is asymptotically equivalent to the
265
273
  expected Fisher information.
274
+
275
+ Examples
276
+ --------
277
+ >>> data = np.array([1.2, 0.8, 1.1, 0.9, 1.0])
278
+ >>> def log_lik(theta):
279
+ ... return -0.5 * np.sum((data - theta[0])**2 / theta[1]) - 2.5 * np.log(theta[1])
280
+ >>> theta = np.array([1.0, 0.1])
281
+ >>> F_obs = observed_fisher_information(log_lik, theta)
266
282
  """
267
283
  return fisher_information_numerical(log_likelihood, theta, h)
268
284
 
@@ -361,6 +377,14 @@ def cramer_rao_bound_biased(
361
377
  -----
362
378
  For a biased estimator with bias b(theta), the CRB becomes:
363
379
  Var(theta_hat) >= (I + db/dtheta) I^{-1} (I + db/dtheta)^T
380
+
381
+ Examples
382
+ --------
383
+ >>> F = np.array([[10.0, 0], [0, 5.0]]) # Fisher info
384
+ >>> db = np.array([[0.1, 0], [0, 0.2]]) # Bias gradient
385
+ >>> crb_biased = cramer_rao_bound_biased(F, db)
386
+ >>> crb_biased.shape
387
+ (2, 2)
364
388
  """
365
389
  F = np.asarray(fisher_info, dtype=np.float64)
366
390
  db = np.asarray(bias_gradient, dtype=np.float64)
@@ -571,6 +595,17 @@ def mle_scoring(
571
595
  -----
572
596
  Fisher scoring update: theta_{n+1} = theta_n + I(theta_n)^{-1} @ score
573
597
  This is equivalent to Newton-Raphson when I(theta) = -E[H].
598
+
599
+ Examples
600
+ --------
601
+ >>> data = np.array([1.0, 1.1, 0.9, 1.2, 0.8])
602
+ >>> def log_lik(theta):
603
+ ... return -0.5 * len(data) * np.log(2*np.pi) - np.sum((data - theta[0])**2) / 2
604
+ >>> def score(theta):
605
+ ... return np.array([np.sum(data - theta[0])])
606
+ >>> def fisher(theta):
607
+ ... return np.array([[len(data)]])
608
+ >>> result = mle_scoring(log_lik, score, fisher, np.array([0.0]))
574
609
  """
575
610
  theta = np.asarray(theta_init, dtype=np.float64).copy()
576
611
 
@@ -729,6 +764,13 @@ def aic(log_likelihood: float, n_params: int) -> float:
729
764
  Notes
730
765
  -----
731
766
  AIC = -2 * log_likelihood + 2 * n_params
767
+
768
+ Examples
769
+ --------
770
+ >>> log_lik = -100.0
771
+ >>> n_params = 3
772
+ >>> aic(log_lik, n_params)
773
+ 206.0
732
774
  """
733
775
  return -2 * log_likelihood + 2 * n_params
734
776
 
@@ -754,6 +796,12 @@ def bic(log_likelihood: float, n_params: int, n_samples: int) -> float:
754
796
  Notes
755
797
  -----
756
798
  BIC = -2 * log_likelihood + n_params * log(n_samples)
799
+
800
+ Examples
801
+ --------
802
+ >>> log_lik = -100.0
803
+ >>> bic(log_lik, n_params=3, n_samples=100)
804
+ 213.81551055796427
757
805
  """
758
806
  return -2 * log_likelihood + n_params * np.log(n_samples)
759
807
 
@@ -780,6 +828,12 @@ def aicc(log_likelihood: float, n_params: int, n_samples: int) -> float:
780
828
  -----
781
829
  AICc adds a correction for small sample sizes:
782
830
  AICc = AIC + 2*k*(k+1)/(n-k-1)
831
+
832
+ Examples
833
+ --------
834
+ >>> log_lik = -50.0
835
+ >>> aicc(log_lik, n_params=3, n_samples=20)
836
+ 109.5
783
837
  """
784
838
  k = n_params
785
839
  n = n_samples
@@ -99,6 +99,15 @@ def huber_weight(r: ArrayLike, c: float = 1.345) -> NDArray[np.floating]:
99
99
  The Huber weight function is:
100
100
  w(r) = 1 if |r| <= c
101
101
  w(r) = c / |r| if |r| > c
102
+
103
+ Examples
104
+ --------
105
+ >>> r = np.array([0.5, 1.0, 2.0, 5.0]) # Standardized residuals
106
+ >>> w = huber_weight(r, c=1.345)
107
+ >>> w[0] # Small residual gets weight 1
108
+ 1.0
109
+ >>> w[3] < 0.5 # Large residual gets reduced weight
110
+ True
102
111
  """
103
112
  r = np.asarray(r, dtype=np.float64)
104
113
  abs_r = np.abs(r)
@@ -129,6 +138,13 @@ def huber_rho(r: ArrayLike, c: float = 1.345) -> NDArray[np.floating]:
129
138
  The Huber rho function is:
130
139
  rho(r) = r^2 / 2 if |r| <= c
131
140
  rho(r) = c * |r| - c^2/2 if |r| > c
141
+
142
+ Examples
143
+ --------
144
+ >>> r = np.array([0.5, 1.0, 2.0])
145
+ >>> rho = huber_rho(r, c=1.345)
146
+ >>> rho[0] # Small residual: r^2/2
147
+ 0.125
132
148
  """
133
149
  r = np.asarray(r, dtype=np.float64)
134
150
  abs_r = np.abs(r)
@@ -160,6 +176,15 @@ def tukey_weight(r: ArrayLike, c: float = 4.685) -> NDArray[np.floating]:
160
176
  w(r) = 0 if |r| > c
161
177
 
162
178
  This provides complete rejection of large outliers.
179
+
180
+ Examples
181
+ --------
182
+ >>> r = np.array([0.5, 2.0, 5.0, 10.0])
183
+ >>> w = tukey_weight(r, c=4.685)
184
+ >>> w[0] > 0.9 # Small residual gets high weight
185
+ True
186
+ >>> w[3] # Large residual completely rejected
187
+ 0.0
163
188
  """
164
189
  r = np.asarray(r, dtype=np.float64)
165
190
  abs_r = np.abs(r)
@@ -190,6 +215,15 @@ def tukey_rho(r: ArrayLike, c: float = 4.685) -> NDArray[np.floating]:
190
215
  The Tukey rho function is:
191
216
  rho(r) = c^2/6 * (1 - (1 - (r/c)^2)^3) if |r| <= c
192
217
  rho(r) = c^2/6 if |r| > c
218
+
219
+ Examples
220
+ --------
221
+ >>> r = np.array([0.0, 2.0, 10.0])
222
+ >>> rho = tukey_rho(r, c=4.685)
223
+ >>> rho[0] # Zero residual
224
+ 0.0
225
+ >>> rho[2] == rho[2] # Large residuals saturate at c^2/6
226
+ True
193
227
  """
194
228
  r = np.asarray(r, dtype=np.float64)
195
229
  abs_r = np.abs(r)
@@ -219,6 +253,15 @@ def cauchy_weight(r: ArrayLike, c: float = 2.385) -> NDArray[np.floating]:
219
253
  -----
220
254
  The Cauchy weight function is:
221
255
  w(r) = 1 / (1 + (r/c)^2)
256
+
257
+ Examples
258
+ --------
259
+ >>> r = np.array([0.0, 1.0, 5.0])
260
+ >>> w = cauchy_weight(r, c=2.385)
261
+ >>> w[0] # Zero residual gets weight 1
262
+ 1.0
263
+ >>> 0 < w[2] < 1 # Large residuals get reduced weight (but never zero)
264
+ True
222
265
  """
223
266
  r = np.asarray(r, dtype=np.float64)
224
267
  return 1 / (1 + (r / c) ** 2)
@@ -251,6 +294,13 @@ def mad(residuals: ArrayLike, c: float = 1.4826) -> float:
251
294
  MAD = c * median(|r - median(r)|)
252
295
 
253
296
  This is a robust scale estimator with 50% breakdown point.
297
+
298
+ Examples
299
+ --------
300
+ >>> residuals = np.array([1.0, 1.1, 0.9, 1.0, 100.0]) # One outlier
301
+ >>> scale = mad(residuals)
302
+ >>> scale < 1.0 # Robust to the outlier
303
+ True
254
304
  """
255
305
  r = np.asarray(residuals, dtype=np.float64)
256
306
  return c * float(np.median(np.abs(r - np.median(r))))
@@ -279,6 +329,13 @@ def tau_scale(
279
329
  Notes
280
330
  -----
281
331
  Tau scale combines high breakdown point with efficiency.
332
+
333
+ Examples
334
+ --------
335
+ >>> residuals = np.array([1.0, 1.1, 0.9, 1.0, 1.2, 100.0]) # One outlier
336
+ >>> scale = tau_scale(residuals)
337
+ >>> scale < 10.0 # Robust to the outlier
338
+ True
282
339
  """
283
340
  r = np.asarray(residuals, dtype=np.float64)
284
341
  n = len(r)
pytcl/terrain/dem.py CHANGED
@@ -438,6 +438,19 @@ def get_elevation_profile(
438
438
  Array of distances from start in meters.
439
439
  elevations : ndarray
440
440
  Array of elevation values in meters.
441
+
442
+ Examples
443
+ --------
444
+ >>> import numpy as np
445
+ >>> dem = create_flat_dem(
446
+ ... np.radians(35), np.radians(36),
447
+ ... np.radians(-120), np.radians(-119),
448
+ ... elevation=500)
449
+ >>> dists, elevs = get_elevation_profile(
450
+ ... dem, np.radians(35.2), np.radians(-119.8),
451
+ ... np.radians(35.8), np.radians(-119.2), n_points=10)
452
+ >>> len(dists) == 10
453
+ True
441
454
  """
442
455
  # Generate points along path
443
456
  lats = np.linspace(lat_start, lat_end, n_points)
@@ -492,6 +505,21 @@ def interpolate_dem(
492
505
  -------
493
506
  DEMGrid
494
507
  New interpolated DEM grid.
508
+
509
+ Examples
510
+ --------
511
+ >>> import numpy as np
512
+ >>> dem = create_flat_dem(
513
+ ... np.radians(35), np.radians(36),
514
+ ... np.radians(-120), np.radians(-119),
515
+ ... elevation=100)
516
+ >>> new_dem = interpolate_dem(
517
+ ... dem,
518
+ ... np.radians(35.2), np.radians(35.8),
519
+ ... np.radians(-119.8), np.radians(-119.2),
520
+ ... new_n_lat=5, new_n_lon=5)
521
+ >>> new_dem.data.shape
522
+ (5, 5)
495
523
  """
496
524
  # Create new coordinate arrays
497
525
  new_lats = np.linspace(new_lat_min, new_lat_max, new_n_lat)
@@ -547,6 +575,22 @@ def merge_dems(
547
575
  -------
548
576
  DEMGrid
549
577
  Merged DEM grid.
578
+
579
+ Examples
580
+ --------
581
+ >>> import numpy as np
582
+ >>> dem1 = create_flat_dem(
583
+ ... np.radians(35), np.radians(36),
584
+ ... np.radians(-120), np.radians(-119), elevation=100)
585
+ >>> dem2 = create_flat_dem(
586
+ ... np.radians(36), np.radians(37),
587
+ ... np.radians(-120), np.radians(-119), elevation=200)
588
+ >>> merged = merge_dems(
589
+ ... [dem1, dem2],
590
+ ... np.radians(35), np.radians(37),
591
+ ... np.radians(-120), np.radians(-119))
592
+ >>> merged.name
593
+ 'Merged DEM'
550
594
  """
551
595
  # Compute output grid dimensions
552
596
  d_lat = np.radians(resolution_arcsec / 3600)
@@ -611,6 +655,19 @@ def create_flat_dem(
611
655
  -------
612
656
  DEMGrid
613
657
  Flat DEM grid.
658
+
659
+ Examples
660
+ --------
661
+ >>> import numpy as np
662
+ >>> dem = create_flat_dem(
663
+ ... np.radians(35), np.radians(36),
664
+ ... np.radians(-120), np.radians(-119),
665
+ ... elevation=500)
666
+ >>> dem.name
667
+ 'Flat DEM'
668
+ >>> result = dem.get_elevation(np.radians(35.5), np.radians(-119.5))
669
+ >>> abs(result.elevation - 500) < 1
670
+ True
614
671
  """
615
672
  d_lat = np.radians(resolution_arcsec / 3600)
616
673
  d_lon = np.radians(resolution_arcsec / 3600)
@@ -672,6 +729,18 @@ def create_synthetic_terrain(
672
729
  -------
673
730
  DEMGrid
674
731
  Synthetic terrain DEM.
732
+
733
+ Examples
734
+ --------
735
+ >>> import numpy as np
736
+ >>> dem = create_synthetic_terrain(
737
+ ... np.radians(35), np.radians(36),
738
+ ... np.radians(-120), np.radians(-119),
739
+ ... base_elevation=500, amplitude=200, seed=42)
740
+ >>> dem.name
741
+ 'Synthetic Terrain'
742
+ >>> dem.data.min() < dem.data.max() # Has elevation variation
743
+ True
675
744
  """
676
745
  if seed is not None:
677
746
  np.random.seed(seed)