nrl-tracker 1.11.0__py3-none-any.whl → 1.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. {nrl_tracker-1.11.0.dist-info → nrl_tracker-1.12.0.dist-info}/METADATA +4 -4
  2. {nrl_tracker-1.11.0.dist-info → nrl_tracker-1.12.0.dist-info}/RECORD +30 -30
  3. pytcl/__init__.py +2 -2
  4. pytcl/assignment_algorithms/network_flow.py +172 -60
  5. pytcl/astronomical/time_systems.py +21 -0
  6. pytcl/containers/cluster_set.py +36 -0
  7. pytcl/coordinate_systems/conversions/geodetic.py +58 -0
  8. pytcl/core/array_utils.py +52 -0
  9. pytcl/gpu/ekf.py +46 -0
  10. pytcl/gpu/kalman.py +16 -0
  11. pytcl/gpu/matrix_utils.py +44 -1
  12. pytcl/gpu/particle_filter.py +33 -0
  13. pytcl/gpu/ukf.py +31 -0
  14. pytcl/gpu/utils.py +15 -0
  15. pytcl/magnetism/igrf.py +72 -0
  16. pytcl/magnetism/wmm.py +52 -0
  17. pytcl/mathematical_functions/basic_matrix/decompositions.py +7 -0
  18. pytcl/mathematical_functions/basic_matrix/special_matrices.py +31 -0
  19. pytcl/mathematical_functions/geometry/geometry.py +33 -0
  20. pytcl/mathematical_functions/interpolation/interpolation.py +83 -0
  21. pytcl/mathematical_functions/signal_processing/detection.py +31 -0
  22. pytcl/mathematical_functions/signal_processing/filters.py +56 -0
  23. pytcl/mathematical_functions/signal_processing/matched_filter.py +32 -1
  24. pytcl/mathematical_functions/special_functions/hypergeometric.py +17 -0
  25. pytcl/mathematical_functions/statistics/estimators.py +71 -0
  26. pytcl/mathematical_functions/transforms/wavelets.py +25 -0
  27. pytcl/navigation/great_circle.py +33 -0
  28. {nrl_tracker-1.11.0.dist-info → nrl_tracker-1.12.0.dist-info}/LICENSE +0 -0
  29. {nrl_tracker-1.11.0.dist-info → nrl_tracker-1.12.0.dist-info}/WHEEL +0 -0
  30. {nrl_tracker-1.11.0.dist-info → nrl_tracker-1.12.0.dist-info}/top_level.txt +0 -0
@@ -539,6 +539,22 @@ def duplication_matrix(n: int) -> NDArray[np.floating]:
539
539
  -------
540
540
  D : ndarray
541
541
  Duplication matrix of shape (n*n, n*(n+1)/2).
542
+
543
+ Examples
544
+ --------
545
+ >>> import numpy as np
546
+ >>> from pytcl.mathematical_functions import duplication_matrix, vech, vec
547
+ >>> # Create duplication matrix for 2x2 symmetric matrices
548
+ >>> D = duplication_matrix(2)
549
+ >>> D.shape
550
+ (4, 3)
551
+ >>> # For symmetric matrix A = [[1, 2], [2, 3]], half-vec has 3 elements
552
+ >>> A = np.array([[1.0, 2.0], [2.0, 3.0]])
553
+ >>> vech_A = vech(A)
554
+ >>> # Duplication matrix should reconstruct full vectorization
555
+ >>> vec_A = D @ vech_A
556
+ >>> np.allclose(vec_A, vec(A))
557
+ True
542
558
  """
543
559
  m = n * (n + 1) // 2
544
560
  D = np.zeros((n * n, m), dtype=np.float64)
@@ -574,6 +590,21 @@ def elimination_matrix(n: int) -> NDArray[np.floating]:
574
590
  -------
575
591
  L : ndarray
576
592
  Elimination matrix of shape (n*(n+1)/2, n*n).
593
+
594
+ Examples
595
+ --------
596
+ >>> import numpy as np
597
+ >>> from pytcl.mathematical_functions import elimination_matrix, vec
598
+ >>> # Create elimination matrix for 2x2 matrices
599
+ >>> L = elimination_matrix(2)
600
+ >>> L.shape
601
+ (3, 4)
602
+ >>> # For matrix A, extracts unique elements: [A[0,0], A[1,0], A[1,1]]
603
+ >>> A = np.array([[1.0, 2.0], [3.0, 4.0]])
604
+ >>> # Elimination extracts lower-triangular elements
605
+ >>> vech_A = L @ vec(A)
606
+ >>> np.allclose(vech_A, [1.0, 3.0, 4.0])
607
+ True
577
608
  """
578
609
  m = n * (n + 1) // 2
579
610
  L = np.zeros((m, n * n), dtype=np.float64)
@@ -132,6 +132,13 @@ def convex_hull_area(points: ArrayLike) -> float:
132
132
  -------
133
133
  area : float
134
134
  Area (2D) or volume (3D) of the convex hull.
135
+
136
+ Examples
137
+ --------
138
+ >>> points = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])
139
+ >>> area = convex_hull_area(points)
140
+ >>> area
141
+ 1.0
135
142
  """
136
143
  points = np.asarray(points, dtype=np.float64)
137
144
  hull = ConvexHull(points)
@@ -180,6 +187,13 @@ def polygon_centroid(vertices: ArrayLike) -> NDArray[np.floating]:
180
187
  -------
181
188
  centroid : ndarray
182
189
  Centroid coordinates (x, y).
190
+
191
+ Examples
192
+ --------
193
+ >>> polygon = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])
194
+ >>> centroid = polygon_centroid(polygon)
195
+ >>> np.allclose(centroid, [0.5, 0.5])
196
+ True
183
197
  """
184
198
  vertices = np.asarray(vertices, dtype=np.float64)
185
199
 
@@ -274,6 +288,25 @@ def line_plane_intersection(
274
288
  -------
275
289
  intersection : ndarray or None
276
290
  Intersection point, or None if line is parallel to plane.
291
+
292
+ Examples
293
+ --------
294
+ >>> import numpy as np
295
+ >>> from pytcl.mathematical_functions.geometry import line_plane_intersection
296
+ >>> # Line: passes through origin with direction (0, 0, 1) [vertical]
297
+ >>> line_point = np.array([0.0, 0.0, 0.0])
298
+ >>> line_dir = np.array([0.0, 0.0, 1.0])
299
+ >>> # Plane: z = 5, normal is (0, 0, 1)
300
+ >>> plane_point = np.array([0.0, 0.0, 5.0])
301
+ >>> plane_normal = np.array([0.0, 0.0, 1.0])
302
+ >>> intersection = line_plane_intersection(line_point, line_dir, plane_point, plane_normal)
303
+ >>> np.allclose(intersection, [0, 0, 5])
304
+ True
305
+ >>> # Parallel case: line and plane parallel, no intersection
306
+ >>> line_dir_parallel = np.array([1.0, 0.0, 0.0])
307
+ >>> intersection = line_plane_intersection(line_point, line_dir_parallel, plane_point, plane_normal)
308
+ >>> intersection is None
309
+ True
277
310
  """
278
311
  line_point = np.asarray(line_point, dtype=np.float64)
279
312
  line_dir = np.asarray(line_dir, dtype=np.float64)
@@ -185,6 +185,21 @@ def pchip(
185
185
  p : PchipInterpolator
186
186
  PCHIP interpolator object.
187
187
 
188
+ Examples
189
+ --------
190
+ >>> import numpy as np
191
+ >>> from pytcl.mathematical_functions.interpolation import pchip
192
+ >>> # Monotonic data: stock prices (should not overshoot)
193
+ >>> x = np.array([0, 1, 2, 3, 4])
194
+ >>> y = np.array([10, 12, 11, 15, 18]) # Non-monotonic but generally increasing
195
+ >>> p = pchip(x, y)
196
+ >>> # Evaluate at intermediate points
197
+ >>> x_new = np.array([0.5, 1.5, 2.5])
198
+ >>> y_new = p(x_new)
199
+ >>> # PCHIP preserves bounds: should stay within observed values
200
+ >>> np.all((y_new >= y.min()) & (y_new <= y.max()))
201
+ True
202
+
188
203
  Notes
189
204
  -----
190
205
  Unlike cubic splines, PCHIP will not overshoot if the data is
@@ -222,6 +237,21 @@ def akima(
222
237
  a : Akima1DInterpolator
223
238
  Akima interpolator object.
224
239
 
240
+ Examples
241
+ --------
242
+ >>> import numpy as np
243
+ >>> from pytcl.mathematical_functions.interpolation import akima
244
+ >>> # Noisy data where smoothness without oscillation is desired
245
+ >>> x = np.array([0, 1, 2, 3, 4, 5])
246
+ >>> y = np.array([0, 1, 1.5, 1.2, 2.0, 2.5]) # Noisy measurements
247
+ >>> a = akima(x, y)
248
+ >>> # Evaluate at intermediate points
249
+ >>> x_new = np.array([0.5, 1.5, 3.5])
250
+ >>> y_new = a(x_new)
251
+ >>> # Akima should produce smooth, non-oscillating results
252
+ >>> y_new.shape
253
+ (3,)
254
+
225
255
  See Also
226
256
  --------
227
257
  scipy.interpolate.Akima1DInterpolator : Underlying implementation.
@@ -307,6 +337,23 @@ def interp3d(
307
337
  f : RegularGridInterpolator
308
338
  Interpolation function.
309
339
 
340
+ Examples
341
+ --------
342
+ >>> import numpy as np
343
+ >>> from pytcl.mathematical_functions.interpolation import interp3d
344
+ >>> # Create a 3D grid: temperature field
345
+ >>> x = np.array([0, 1, 2])
346
+ >>> y = np.array([0, 1, 2])
347
+ >>> z = np.array([0, 1])
348
+ >>> # Temperature values at grid points (3x3x2)
349
+ >>> values = np.arange(18).reshape((3, 3, 2), order='C').astype(float)
350
+ >>> f = interp3d(x, y, z, values, kind='linear')
351
+ >>> # Interpolate at intermediate points
352
+ >>> pts = np.array([[0.5, 0.5, 0.5], [1.5, 1.5, 0.5]])
353
+ >>> result = f(pts)
354
+ >>> result.shape
355
+ (2,)
356
+
310
357
  See Also
311
358
  --------
312
359
  scipy.interpolate.RegularGridInterpolator : Underlying implementation.
@@ -397,6 +444,23 @@ def barycentric(
397
444
  p : BarycentricInterpolator
398
445
  Barycentric interpolator object.
399
446
 
447
+ Examples
448
+ --------
449
+ >>> import numpy as np
450
+ >>> from pytcl.mathematical_functions.interpolation import barycentric
451
+ >>> # Interpolate a polynomial at Chebyshev nodes (most stable)
452
+ >>> n = 5
453
+ >>> x = np.cos(np.linspace(0, np.pi, n)) # Chebyshev nodes
454
+ >>> y = x**2 + 2*x + 1 # Quadratic function
455
+ >>> poly = barycentric(x, y)
456
+ >>> # Evaluate interpolant at new points
457
+ >>> x_new = np.linspace(-0.8, 0.8, 3)
458
+ >>> y_interp = poly(x_new)
459
+ >>> # Should match the original function well
460
+ >>> y_exact = x_new**2 + 2*x_new + 1
461
+ >>> np.allclose(y_interp, y_exact, atol=0.01)
462
+ True
463
+
400
464
  See Also
401
465
  --------
402
466
  scipy.interpolate.BarycentricInterpolator : Underlying implementation.
@@ -427,6 +491,25 @@ def krogh(
427
491
  k : KroghInterpolator
428
492
  Krogh interpolator object.
429
493
 
494
+ Examples
495
+ --------
496
+ >>> import numpy as np
497
+ >>> from pytcl.mathematical_functions.interpolation import krogh
498
+ >>> # Hermite interpolation with function values and derivatives
499
+ >>> x = np.array([0, 1, 2])
500
+ >>> # For Hermite interpolation, y rows are: function values, then derivatives
501
+ >>> y = np.array([
502
+ ... [1, 2, 3], # Function values at x=0, 1, 2
503
+ ... [0, 1, 2], # Derivatives at x=0, 1, 2
504
+ ... ])
505
+ >>> k = krogh(x, y)
506
+ >>> # Evaluate interpolant at new points
507
+ >>> x_new = np.array([0.5, 1.5])
508
+ >>> y_interp = k(x_new)
509
+ >>> # Interpolant passes through original points and matches derivatives
510
+ >>> np.allclose(k(x), y[0])
511
+ True
512
+
430
513
  See Also
431
514
  --------
432
515
  scipy.interpolate.KroghInterpolator : Underlying implementation.
@@ -711,6 +711,21 @@ def cfar_so(
711
711
  result : CFARResult
712
712
  Named tuple with detection results.
713
713
 
714
+ Examples
715
+ --------
716
+ >>> import numpy as np
717
+ >>> from pytcl.mathematical_functions.signal_processing import cfar_so
718
+ >>> # Create test signal with closely spaced targets in clutter
719
+ >>> np.random.seed(42)
720
+ >>> signal = np.random.exponential(1.0, 500)
721
+ >>> signal[200:205] = [30, 40, 35, 45, 38] # Target cluster
722
+ >>> signal[350] = 50 # Isolated target
723
+ >>> # Detect using SO-CFAR
724
+ >>> result = cfar_so(signal, guard_cells=2, ref_cells=16, pfa=1e-4)
725
+ >>> # SO-CFAR good for clutter edge detection
726
+ >>> len(result.detection_indices) >= 2 # Should find multiple targets
727
+ True
728
+
714
729
  Notes
715
730
  -----
716
731
  SO-CFAR is complementary to GO-CFAR. It is more sensitive near
@@ -966,6 +981,22 @@ def cluster_detections(
966
981
  -------
967
982
  peak_indices : ndarray
968
983
  Indices of detection peaks after clustering.
984
+
985
+ Examples
986
+ --------
987
+ >>> import numpy as np
988
+ >>> from pytcl.mathematical_functions.signal_processing import cluster_detections
989
+ >>> # CFAR detection result with closely spaced detections
990
+ >>> detections = np.zeros(100, dtype=bool)
991
+ >>> detections[20:24] = True # Cluster 1 (4 adjacent detections)
992
+ >>> detections[60] = True # Cluster 2 (single detection)
993
+ >>> detections[62] = True # Close to cluster 2
994
+ >>> # Cluster with min_separation=1 (adjacent counts as same cluster)
995
+ >>> peaks = cluster_detections(detections, min_separation=1)
996
+ >>> len(peaks) # Should find 2 clusters
997
+ 2
998
+ >>> peaks[0] # Center of first cluster (indices 20-23)
999
+ 21
969
1000
  """
970
1001
  detections = np.asarray(detections)
971
1002
 
@@ -810,6 +810,22 @@ def sos_to_zpk(sos: ArrayLike) -> tuple[NDArray[Any], NDArray[Any], Any]:
810
810
  Poles.
811
811
  k : float
812
812
  Gain.
813
+
814
+ Examples
815
+ --------
816
+ >>> import numpy as np
817
+ >>> from pytcl.mathematical_functions.signal_processing import (
818
+ ... sos_to_zpk, butter_sos
819
+ ... )
820
+ >>> # Design a Butterworth filter and convert to ZPK form
821
+ >>> sos = butter_sos(4, 0.3) # 4th order Butterworth lowpass
822
+ >>> z, p, k = sos_to_zpk(sos)
823
+ >>> # Check filter stability: poles must be inside unit circle
824
+ >>> np.all(np.abs(p) < 1.0)
825
+ True
826
+ >>> # Verify number of poles matches filter order
827
+ >>> len(p) == 4
828
+ True
813
829
  """
814
830
  return scipy_signal.sos2zpk(np.asarray(sos))
815
831
 
@@ -835,5 +851,45 @@ def zpk_to_sos(
835
851
  -------
836
852
  sos : ndarray
837
853
  Second-order sections array.
854
+
855
+ Examples
856
+ --------
857
+ >>> import numpy as np
858
+ >>> from pytcl.mathematical_functions.signal_processing import (
859
+ ... zpk_to_sos
860
+ ... )
861
+ >>> # Create a simple 2nd order Butterworth-like filter
862
+ >>> z = np.array([-1.0, -1.0]) # Zeros at -1
863
+ >>> p = np.array([0.7071 * np.exp(1j * np.pi / 4),
864
+ ... 0.7071 * np.exp(-1j * np.pi / 4)]) # Complex poles
865
+ >>> k = 1.0
866
+ >>> sos = zpk_to_sos(z, p, k)
867
+ >>> sos.shape
868
+ (1, 6)
869
+ >>> # Verify roundtrip conversion
870
+ >>> from pytcl.mathematical_functions.signal_processing import sos_to_zpk
871
+ >>> z2, p2, k2 = sos_to_zpk(sos)
872
+ >>> np.allclose(z, z2) and np.allclose(p, p2)
873
+ True
838
874
  """
839
875
  return scipy_signal.zpk2sos(np.asarray(z), np.asarray(p), k, pairing=pairing)
876
+
877
+
878
+ __all__ = [
879
+ "FilterCoefficients",
880
+ "FrequencyResponse",
881
+ "butter_design",
882
+ "cheby1_design",
883
+ "cheby2_design",
884
+ "ellip_design",
885
+ "bessel_design",
886
+ "fir_design",
887
+ "fir_design_remez",
888
+ "apply_filter",
889
+ "filtfilt",
890
+ "frequency_response",
891
+ "group_delay",
892
+ "filter_order",
893
+ "sos_to_zpk",
894
+ "zpk_to_sos",
895
+ ]
@@ -273,12 +273,23 @@ def optimal_filter(
273
273
  Examples
274
274
  --------
275
275
  >>> import numpy as np
276
+ >>> # White noise case (simple matched filter)
276
277
  >>> signal = np.random.randn(256)
277
278
  >>> template = np.ones(16)
278
- >>> noise_psd = np.ones(256) # White noise
279
+ >>> noise_psd = np.ones(256) # White noise (flat PSD)
279
280
  >>> output = optimal_filter(signal, template, noise_psd)
280
281
  >>> len(output) == len(signal)
281
282
  True
283
+ >>> # Colored noise case (Wiener filtering optimal)
284
+ >>> # Create signal with target embedded in colored noise
285
+ >>> target = np.array([1, 2, 3, 2, 1])
286
+ >>> noise_freq = np.linspace(0, 1, 256)
287
+ >>> colored_noise_psd = 1.0 + 2.0 * np.exp(-5 * noise_freq) # Red noise
288
+ >>> colored_noise = np.random.randn(256) * np.sqrt(colored_noise_psd)
289
+ >>> signal = np.concatenate([colored_noise, target, colored_noise])
290
+ >>> output = optimal_filter(signal, target, colored_noise_psd)
291
+ >>> len(output) == len(signal)
292
+ True
282
293
 
283
294
  Notes
284
295
  -----
@@ -751,6 +762,26 @@ def cross_ambiguity(
751
762
  Doppler frequency values in Hz.
752
763
  caf : ndarray
753
764
  Cross-ambiguity function (2D, complex).
765
+
766
+ Examples
767
+ --------
768
+ >>> import numpy as np
769
+ >>> from pytcl.mathematical_functions.signal_processing import (
770
+ ... cross_ambiguity, generate_lfm_chirp
771
+ ... )
772
+ >>> # Generate two LFM chirps for correlation analysis
773
+ >>> fs = 10000 # 10 kHz sampling
774
+ >>> signal1 = generate_lfm_chirp(0.001, 1000, 2000, fs) # 1 ms chirp
775
+ >>> signal2 = generate_lfm_chirp(0.001, 1000, 2000, fs) # Identical chirp
776
+ >>> # Compute cross-ambiguity function
777
+ >>> delays, dopplers, caf = cross_ambiguity(
778
+ ... signal1, signal2, fs, n_delay=64, n_doppler=64
779
+ ... )
780
+ >>> # Auto-correlation should have peak near zero delay/Doppler
781
+ >>> caf.shape
782
+ (64, 64)
783
+ >>> np.max(np.abs(caf)) > 0.9 # High correlation for identical signals
784
+ True
754
785
  """
755
786
  signal1 = np.asarray(signal1, dtype=np.complex128)
756
787
  signal2 = np.asarray(signal2, dtype=np.complex128)
@@ -311,6 +311,23 @@ def hyp1f1_regularized(
311
311
  F : ndarray
312
312
  Values of 1F1(a; b; z) / Gamma(b).
313
313
 
314
+ Examples
315
+ --------
316
+ >>> import numpy as np
317
+ >>> from pytcl.mathematical_functions.special_functions import hyp1f1_regularized
318
+ >>> # Regularized form avoids overflow for problematic b values
319
+ >>> a, b, z = 0.5, 1.5, 1.0
320
+ >>> f_reg = hyp1f1_regularized(a, b, z)
321
+ >>> # Should give finite, non-overflowing result
322
+ >>> np.isfinite(f_reg)
323
+ True
324
+ >>> # Compare to regular hypergeometric computation
325
+ >>> from pytcl.mathematical_functions.special_functions import hyp1f1
326
+ >>> import scipy.special as sp
327
+ >>> f_normal = hyp1f1(a, b, z) / sp.gamma(b)
328
+ >>> np.allclose(f_reg, f_normal)
329
+ True
330
+
314
331
  Notes
315
332
  -----
316
333
  This function remains finite even when b is a non-positive integer,
@@ -68,6 +68,13 @@ def weighted_var(
68
68
  -------
69
69
  var : ndarray
70
70
  Weighted variance.
71
+
72
+ Examples
73
+ --------
74
+ >>> x = [1, 2, 3]
75
+ >>> weights = [1, 1, 2]
76
+ >>> weighted_var(x, weights)
77
+ 0.5625
71
78
  """
72
79
  x = np.asarray(x, dtype=np.float64)
73
80
  weights = np.asarray(weights, dtype=np.float64)
@@ -106,6 +113,14 @@ def weighted_cov(
106
113
  -------
107
114
  cov : ndarray
108
115
  Weighted covariance matrix of shape (n_features, n_features).
116
+
117
+ Examples
118
+ --------
119
+ >>> x = [[1, 2], [2, 3], [3, 4]]
120
+ >>> weights = [1, 1, 1]
121
+ >>> cov = weighted_cov(x, weights)
122
+ >>> cov.shape
123
+ (2, 2)
109
124
  """
110
125
  x = np.asarray(x, dtype=np.float64)
111
126
  weights = np.asarray(weights, dtype=np.float64)
@@ -173,6 +188,12 @@ def sample_var(
173
188
  -------
174
189
  var : ndarray
175
190
  Sample variance.
191
+
192
+ Examples
193
+ --------
194
+ >>> x = [1, 2, 3, 4, 5]
195
+ >>> sample_var(x)
196
+ 2.5
176
197
  """
177
198
  return np.var(x, ddof=ddof, axis=axis, dtype=np.float64)
178
199
 
@@ -198,6 +219,13 @@ def sample_cov(
198
219
  -------
199
220
  cov : ndarray
200
221
  Covariance matrix.
222
+
223
+ Examples
224
+ --------
225
+ >>> x = [[1, 2], [2, 3], [3, 4]]
226
+ >>> cov = sample_cov(x)
227
+ >>> cov.shape
228
+ (2, 2)
201
229
  """
202
230
  x = np.asarray(x, dtype=np.float64)
203
231
 
@@ -224,6 +252,15 @@ def sample_corr(x: ArrayLike) -> NDArray[np.floating]:
224
252
  -------
225
253
  corr : ndarray
226
254
  Correlation matrix of shape (n_features, n_features).
255
+
256
+ Examples
257
+ --------
258
+ >>> x = [[1, 2], [2, 3], [3, 4]]
259
+ >>> corr = sample_corr(x)
260
+ >>> corr.shape
261
+ (2, 2)
262
+ >>> corr[0, 0] # Correlation of feature 1 with itself
263
+ 1.0
227
264
  """
228
265
  return np.corrcoef(np.asarray(x, dtype=np.float64).T)
229
266
 
@@ -246,6 +283,13 @@ def median(
246
283
  -------
247
284
  med : ndarray
248
285
  Median value(s).
286
+
287
+ Examples
288
+ --------
289
+ >>> median([1, 2, 3, 4, 5])
290
+ 3.0
291
+ >>> median([1, 2, 3, 4])
292
+ 2.5
249
293
  """
250
294
  return np.median(x, axis=axis)
251
295
 
@@ -275,6 +319,11 @@ def mad(
275
319
  mad : ndarray
276
320
  MAD value(s).
277
321
 
322
+ Examples
323
+ --------
324
+ >>> mad([1, 2, 3, 4, 5])
325
+ 1.4826
326
+
278
327
  Notes
279
328
  -----
280
329
  For normally distributed data, scale * MAD approximates the
@@ -303,6 +352,11 @@ def iqr(
303
352
  -------
304
353
  iqr : ndarray
305
354
  Interquartile range (Q3 - Q1).
355
+
356
+ Examples
357
+ --------
358
+ >>> iqr([1, 2, 3, 4, 5, 6, 7, 8, 9])
359
+ 4.5
306
360
  """
307
361
  x = np.asarray(x, dtype=np.float64)
308
362
  q75, q25 = np.percentile(x, [75, 25], axis=axis)
@@ -330,6 +384,11 @@ def skewness(
330
384
  -------
331
385
  skew : ndarray
332
386
  Skewness value(s).
387
+
388
+ Examples
389
+ --------
390
+ >>> skewness([1, 2, 3, 4, 5])
391
+ 0.0
333
392
  """
334
393
  from scipy.stats import skew as scipy_skew
335
394
 
@@ -361,6 +420,11 @@ def kurtosis(
361
420
  -------
362
421
  kurt : ndarray
363
422
  Kurtosis value(s).
423
+
424
+ Examples
425
+ --------
426
+ >>> kurtosis([1, 2, 3, 4, 5])
427
+ -1.2
364
428
  """
365
429
  from scipy.stats import kurtosis as scipy_kurtosis
366
430
 
@@ -393,6 +457,13 @@ def moment(
393
457
  -------
394
458
  m : ndarray
395
459
  Moment value(s).
460
+
461
+ Examples
462
+ --------
463
+ >>> moment([1, 2, 3, 4, 5], order=2)
464
+ 2.0
465
+ >>> moment([1, 2, 3, 4, 5], order=2, central=False)
466
+ 11.0
396
467
  """
397
468
  from scipy.stats import moment as scipy_moment
398
469
 
@@ -816,6 +816,31 @@ def threshold_coefficients(
816
816
  -------
817
817
  result : DWTResult
818
818
  Thresholded coefficients.
819
+
820
+ Examples
821
+ --------
822
+ >>> import numpy as np
823
+ >>> from pytcl.mathematical_functions.transforms import dwt, threshold_coefficients, idwt
824
+ >>> # Create noisy signal
825
+ >>> t = np.linspace(0, 1, 256)
826
+ >>> signal = np.sin(2 * np.pi * 5 * t)
827
+ >>> noise = 0.5 * np.random.randn(256)
828
+ >>> noisy_signal = signal + noise
829
+ >>> # Denoise using wavelet thresholding
830
+ >>> coeffs = dwt(noisy_signal, wavelet='db4', level=3)
831
+ >>> # Apply soft threshold (default automatic threshold value)
832
+ >>> coeffs_denoised = threshold_coefficients(coeffs, threshold='soft')
833
+ >>> # Reconstruct signal from thresholded coefficients
834
+ >>> signal_denoised = idwt(coeffs_denoised)
835
+ >>> len(signal_denoised) == len(noisy_signal)
836
+ True
837
+
838
+ Notes
839
+ -----
840
+ When value is None, the universal threshold is computed as:
841
+ sigma * sqrt(2 * log(n))
842
+ where sigma is estimated from the finest detail coefficients
843
+ and n is the total number of coefficients.
819
844
  """
820
845
  if not PYWT_AVAILABLE:
821
846
  raise ImportError(
@@ -901,6 +901,19 @@ def clear_great_circle_cache() -> None:
901
901
 
902
902
  This can be useful to free memory after processing large datasets
903
903
  or when cache statistics are being monitored.
904
+
905
+ Examples
906
+ --------
907
+ >>> import numpy as np
908
+ >>> from pytcl.navigation import great_circle_distance, clear_great_circle_cache
909
+ >>> # Compute a few distances (cached)
910
+ >>> d1 = great_circle_distance(0, 0, np.radians(1), 0)
911
+ >>> d2 = great_circle_distance(0, 0, np.radians(2), 0)
912
+ >>> # Check cache state before clear
913
+ >>> cache_before = great_circle_distance.__wrapped__.__self__.cache_info()
914
+ >>> # Clear all cached values
915
+ >>> clear_great_circle_cache()
916
+ >>> # Cache is now empty
904
917
  """
905
918
  _gc_distance_cached.cache_clear()
906
919
  _gc_azimuth_cached.cache_clear()
@@ -914,6 +927,26 @@ def get_cache_info() -> dict[str, Any]:
914
927
  -------
915
928
  dict[str, Any]
916
929
  Dictionary with cache statistics for distance and azimuth caches.
930
+
931
+ Examples
932
+ --------
933
+ >>> import numpy as np
934
+ >>> from pytcl.navigation import great_circle_distance, get_cache_info, clear_great_circle_cache
935
+ >>> # Clear cache first
936
+ >>> clear_great_circle_cache()
937
+ >>> # Compute some distances (multiple calls to test cache hits)
938
+ >>> d1 = great_circle_distance(0, 0, np.radians(1), 0)
939
+ >>> d1_again = great_circle_distance(0, 0, np.radians(1), 0) # Cache hit
940
+ >>> # Get cache statistics
941
+ >>> info = get_cache_info()
942
+ >>> info['distance']['hits'] > 0 # Should have at least one hit
943
+ True
944
+ >>> info['distance']['currsize'] > 0 # Cache is not empty
945
+ True
946
+
947
+ See Also
948
+ --------
949
+ clear_great_circle_cache : Clear all cached values.
917
950
  """
918
951
  return {
919
952
  "distance": _gc_distance_cached.cache_info()._asdict(),