nrl-tracker 1.2.0__py3-none-any.whl → 1.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,11 +8,12 @@ and guarantees positive semi-definiteness of the covariance.
8
8
 
9
9
  Implementations include:
10
10
  - Square-root Kalman filter (Cholesky-based)
11
- - U-D factorization filter (Bierman's method)
12
- - Square-root versions of UKF and CKF
11
+
12
+ For U-D factorization filters, see :mod:`pytcl.dynamic_estimation.kalman.ud_filter`.
13
+ For square-root UKF, see :mod:`pytcl.dynamic_estimation.kalman.sr_ukf`.
13
14
  """
14
15
 
15
- from typing import Callable, NamedTuple, Optional
16
+ from typing import NamedTuple, Optional
16
17
 
17
18
  import numpy as np
18
19
  import scipy.linalg
@@ -76,27 +77,6 @@ class SRKalmanUpdate(NamedTuple):
76
77
  likelihood: float
77
78
 
78
79
 
79
- class UDState(NamedTuple):
80
- """State of a U-D factorization filter.
81
-
82
- The covariance is represented as P = U @ D @ U.T where U is
83
- unit upper triangular and D is diagonal.
84
-
85
- Attributes
86
- ----------
87
- x : ndarray
88
- State estimate.
89
- U : ndarray
90
- Unit upper triangular factor.
91
- D : ndarray
92
- Diagonal elements (1D array).
93
- """
94
-
95
- x: NDArray[np.floating]
96
- U: NDArray[np.floating]
97
- D: NDArray[np.floating]
98
-
99
-
100
80
  def cholesky_update(S: NDArray, v: NDArray, sign: float = 1.0) -> NDArray:
101
81
  """
102
82
  Rank-1 Cholesky update/downdate.
@@ -126,6 +106,16 @@ def cholesky_update(S: NDArray, v: NDArray, sign: float = 1.0) -> NDArray:
126
106
  .. [1] P. E. Gill, G. H. Golub, W. Murray, and M. A. Saunders,
127
107
  "Methods for modifying matrix factorizations,"
128
108
  Mathematics of Computation, vol. 28, pp. 505-535, 1974.
109
+
110
+ Examples
111
+ --------
112
+ >>> import numpy as np
113
+ >>> S = np.linalg.cholesky(np.eye(2))
114
+ >>> v = np.array([0.5, 0.5])
115
+ >>> S_updated = cholesky_update(S, v, sign=1.0)
116
+ >>> P_updated = S_updated @ S_updated.T
117
+ >>> np.allclose(P_updated, np.eye(2) + np.outer(v, v))
118
+ True
129
119
  """
130
120
  S = np.asarray(S, dtype=np.float64).copy()
131
121
  v = np.asarray(v, dtype=np.float64).flatten().copy()
@@ -183,6 +173,14 @@ def qr_update(S_x: NDArray, S_noise: NDArray, F: Optional[NDArray] = None) -> ND
183
173
  -----
184
174
  Uses QR decomposition for numerical stability. The compound matrix
185
175
  [F @ S_x, S_noise].T is QR decomposed, and R.T gives the new Cholesky factor.
176
+
177
+ Examples
178
+ --------
179
+ >>> import numpy as np
180
+ >>> S_x = np.linalg.cholesky(np.eye(2) * 0.1)
181
+ >>> S_noise = np.linalg.cholesky(np.eye(2) * 0.01)
182
+ >>> F = np.array([[1, 1], [0, 1]])
183
+ >>> S_new = qr_update(S_x, S_noise, F)
186
184
  """
187
185
  S_x = np.asarray(S_x, dtype=np.float64)
188
186
  S_noise = np.asarray(S_noise, dtype=np.float64)
@@ -424,559 +422,42 @@ def srkf_predict_update(
424
422
  -------
425
423
  result : SRKalmanUpdate
426
424
  Updated state and Cholesky factor.
425
+
426
+ Examples
427
+ --------
428
+ >>> import numpy as np
429
+ >>> x = np.array([0.0, 1.0])
430
+ >>> S = np.linalg.cholesky(np.eye(2) * 0.1)
431
+ >>> F = np.array([[1, 1], [0, 1]])
432
+ >>> S_Q = np.linalg.cholesky(np.eye(2) * 0.01)
433
+ >>> H = np.array([[1, 0]])
434
+ >>> S_R = np.linalg.cholesky(np.array([[0.1]]))
435
+ >>> z = np.array([1.05])
436
+ >>> result = srkf_predict_update(x, S, z, F, S_Q, H, S_R)
427
437
  """
428
438
  pred = srkf_predict(x, S, F, S_Q, B, u)
429
439
  return srkf_update(pred.x, pred.S, z, H, S_R)
430
440
 
431
441
 
432
442
  # =============================================================================
433
- # U-D Factorization Filter (Bierman's Method)
434
- # =============================================================================
435
-
436
-
437
- def ud_factorize(P: ArrayLike) -> tuple[NDArray, NDArray]:
438
- """
439
- Compute U-D factorization of a symmetric positive definite matrix.
440
-
441
- Decomposes P = U @ D @ U.T where U is unit upper triangular and D is diagonal.
442
-
443
- Parameters
444
- ----------
445
- P : array_like
446
- Symmetric positive definite matrix, shape (n, n).
447
-
448
- Returns
449
- -------
450
- U : ndarray
451
- Unit upper triangular matrix.
452
- D : ndarray
453
- Diagonal elements (1D array).
454
-
455
- Notes
456
- -----
457
- The U-D factorization is equivalent to a modified Cholesky decomposition
458
- and requires only n(n+1)/2 storage elements.
459
- """
460
- P = np.asarray(P, dtype=np.float64).copy() # Make a copy to avoid modifying input
461
- n = P.shape[0]
462
-
463
- U = np.eye(n)
464
- D = np.zeros(n)
465
-
466
- for j in range(n - 1, -1, -1):
467
- D[j] = P[j, j]
468
- if D[j] > 0:
469
- alpha = 1.0 / D[j]
470
- for k in range(j):
471
- U[k, j] = P[k, j] * alpha
472
- for i in range(j):
473
- for k in range(i + 1):
474
- P[k, i] = P[k, i] - U[k, j] * D[j] * U[i, j]
475
-
476
- return U, D
477
-
478
-
479
- def ud_reconstruct(U: ArrayLike, D: ArrayLike) -> NDArray:
480
- """
481
- Reconstruct covariance matrix from U-D factors.
482
-
483
- Parameters
484
- ----------
485
- U : array_like
486
- Unit upper triangular matrix.
487
- D : array_like
488
- Diagonal elements.
489
-
490
- Returns
491
- -------
492
- P : ndarray
493
- Covariance matrix P = U @ diag(D) @ U.T.
494
- """
495
- U = np.asarray(U, dtype=np.float64)
496
- D = np.asarray(D, dtype=np.float64)
497
- return U @ np.diag(D) @ U.T
498
-
499
-
500
- def ud_predict(
501
- x: ArrayLike,
502
- U: ArrayLike,
503
- D: ArrayLike,
504
- F: ArrayLike,
505
- Q: ArrayLike,
506
- ) -> tuple[NDArray, NDArray, NDArray]:
507
- """
508
- U-D filter prediction step.
509
-
510
- Parameters
511
- ----------
512
- x : array_like
513
- Current state estimate, shape (n,).
514
- U : array_like
515
- Unit upper triangular factor, shape (n, n).
516
- D : array_like
517
- Diagonal elements, shape (n,).
518
- F : array_like
519
- State transition matrix, shape (n, n).
520
- Q : array_like
521
- Process noise covariance, shape (n, n).
522
-
523
- Returns
524
- -------
525
- x_pred : ndarray
526
- Predicted state.
527
- U_pred : ndarray
528
- Predicted unit upper triangular factor.
529
- D_pred : ndarray
530
- Predicted diagonal elements.
531
- """
532
- x = np.asarray(x, dtype=np.float64).flatten()
533
- U = np.asarray(U, dtype=np.float64)
534
- D = np.asarray(D, dtype=np.float64)
535
- F = np.asarray(F, dtype=np.float64)
536
- Q = np.asarray(Q, dtype=np.float64)
537
-
538
- # Predicted state
539
- x_pred = F @ x
540
-
541
- # Predicted covariance: P_pred = F @ P @ F.T + Q
542
- P = ud_reconstruct(U, D)
543
- P_pred = F @ P @ F.T + Q
544
-
545
- # Ensure symmetry
546
- P_pred = (P_pred + P_pred.T) / 2
547
-
548
- # Re-factorize
549
- U_pred, D_pred = ud_factorize(P_pred)
550
-
551
- return x_pred, U_pred, D_pred
552
-
553
-
554
- def ud_update_scalar(
555
- x: ArrayLike,
556
- U: ArrayLike,
557
- D: ArrayLike,
558
- z: float,
559
- h: ArrayLike,
560
- r: float,
561
- ) -> tuple[NDArray, NDArray, NDArray]:
562
- """
563
- U-D filter scalar measurement update (Bierman's algorithm).
564
-
565
- This is the most efficient form - for vector measurements,
566
- process each component sequentially.
567
-
568
- Parameters
569
- ----------
570
- x : array_like
571
- Predicted state estimate, shape (n,).
572
- U : array_like
573
- Unit upper triangular factor, shape (n, n).
574
- D : array_like
575
- Diagonal elements, shape (n,).
576
- z : float
577
- Scalar measurement.
578
- h : array_like
579
- Measurement row vector, shape (n,).
580
- r : float
581
- Measurement noise variance.
582
-
583
- Returns
584
- -------
585
- x_upd : ndarray
586
- Updated state.
587
- U_upd : ndarray
588
- Updated unit upper triangular factor.
589
- D_upd : ndarray
590
- Updated diagonal elements.
591
-
592
- Notes
593
- -----
594
- This implements Bierman's sequential scalar update algorithm which
595
- is numerically stable and efficient for U-D filters.
596
- """
597
- x = np.asarray(x, dtype=np.float64).flatten()
598
- U = np.asarray(U, dtype=np.float64).copy()
599
- D = np.asarray(D, dtype=np.float64).copy()
600
- h = np.asarray(h, dtype=np.float64).flatten()
601
- n = len(x)
602
-
603
- # f = U.T @ h
604
- f = U.T @ h
605
-
606
- # g = D * f (element-wise)
607
- g = D * f
608
-
609
- # alpha[0] = r + f[0] * g[0]
610
- alpha = np.zeros(n + 1)
611
- alpha[0] = r
612
-
613
- for j in range(n):
614
- alpha[j + 1] = alpha[j] + f[j] * g[j]
615
-
616
- # Innovation
617
- y = z - h @ x
618
-
619
- # Update D and U
620
- D_upd = D.copy()
621
- U_upd = U.copy()
622
-
623
- for j in range(n):
624
- D_upd[j] = D[j] * alpha[j] / alpha[j + 1]
625
- if j > 0:
626
- gamma = g[j]
627
- for i in range(j):
628
- U_upd[i, j] = U[i, j] + (gamma / alpha[j]) * (f[i] - U[i, j] * f[j])
629
- g[i] = g[i] + g[j] * U[i, j]
630
-
631
- # Kalman gain
632
- K = g / alpha[n]
633
-
634
- # Updated state
635
- x_upd = x + K * y
636
-
637
- return x_upd, U_upd, D_upd
638
-
639
-
640
- def ud_update(
641
- x: ArrayLike,
642
- U: ArrayLike,
643
- D: ArrayLike,
644
- z: ArrayLike,
645
- H: ArrayLike,
646
- R: ArrayLike,
647
- ) -> tuple[NDArray, NDArray, NDArray, NDArray, float]:
648
- """
649
- U-D filter vector measurement update.
650
-
651
- Processes measurements sequentially using scalar updates.
652
-
653
- Parameters
654
- ----------
655
- x : array_like
656
- Predicted state estimate, shape (n,).
657
- U : array_like
658
- Unit upper triangular factor, shape (n, n).
659
- D : array_like
660
- Diagonal elements, shape (n,).
661
- z : array_like
662
- Measurement vector, shape (m,).
663
- H : array_like
664
- Measurement matrix, shape (m, n).
665
- R : array_like
666
- Measurement noise covariance, shape (m, m).
667
- Should be diagonal for sequential processing.
668
-
669
- Returns
670
- -------
671
- x_upd : ndarray
672
- Updated state.
673
- U_upd : ndarray
674
- Updated unit upper triangular factor.
675
- D_upd : ndarray
676
- Updated diagonal elements.
677
- y : ndarray
678
- Innovation vector.
679
- likelihood : float
680
- Measurement likelihood.
681
-
682
- Notes
683
- -----
684
- For correlated measurement noise (non-diagonal R), the measurements
685
- are decorrelated first using a Cholesky decomposition.
686
- """
687
- x = np.asarray(x, dtype=np.float64).flatten()
688
- U = np.asarray(U, dtype=np.float64)
689
- D = np.asarray(D, dtype=np.float64)
690
- z = np.asarray(z, dtype=np.float64).flatten()
691
- H = np.asarray(H, dtype=np.float64)
692
- R = np.asarray(R, dtype=np.float64)
693
- m = len(z)
694
-
695
- # Full innovation before update
696
- y = z - H @ x
697
-
698
- # Check if R is diagonal
699
- is_diagonal = np.allclose(R, np.diag(np.diag(R)))
700
-
701
- if is_diagonal:
702
- # Sequential scalar updates
703
- x_upd = x.copy()
704
- U_upd = U.copy()
705
- D_upd = D.copy()
706
-
707
- for i in range(m):
708
- x_upd, U_upd, D_upd = ud_update_scalar(
709
- x_upd, U_upd, D_upd, z[i], H[i, :], R[i, i]
710
- )
711
- else:
712
- # Decorrelate measurements
713
- S_R = np.linalg.cholesky(R)
714
- z_dec = scipy.linalg.solve_triangular(S_R, z, lower=True)
715
- H_dec = scipy.linalg.solve_triangular(S_R, H, lower=True)
716
-
717
- # Sequential scalar updates with unit variance
718
- x_upd = x.copy()
719
- U_upd = U.copy()
720
- D_upd = D.copy()
721
-
722
- for i in range(m):
723
- x_upd, U_upd, D_upd = ud_update_scalar(
724
- x_upd, U_upd, D_upd, z_dec[i], H_dec[i, :], 1.0
725
- )
726
-
727
- # Compute likelihood
728
- P = ud_reconstruct(U, D)
729
- S_innov = H @ P @ H.T + R
730
- det_S = np.linalg.det(S_innov)
731
- if det_S > 0:
732
- mahal_sq = y @ np.linalg.solve(S_innov, y)
733
- likelihood = np.exp(-0.5 * mahal_sq) / np.sqrt((2 * np.pi) ** m * det_S)
734
- else:
735
- likelihood = 0.0
736
-
737
- return x_upd, U_upd, D_upd, y, likelihood
738
-
739
-
740
- # =============================================================================
741
- # Square-Root UKF
443
+ # Backward compatibility: Re-export from submodules
742
444
  # =============================================================================
743
445
 
744
-
745
- def sr_ukf_predict(
746
- x: ArrayLike,
747
- S: ArrayLike,
748
- f: Callable,
749
- S_Q: ArrayLike,
750
- alpha: float = 1e-3,
751
- beta: float = 2.0,
752
- kappa: float = 0.0,
753
- ) -> SRKalmanPrediction:
754
- """
755
- Square-root Unscented Kalman Filter prediction step.
756
-
757
- Parameters
758
- ----------
759
- x : array_like
760
- Current state estimate, shape (n,).
761
- S : array_like
762
- Lower triangular Cholesky factor of covariance, shape (n, n).
763
- f : callable
764
- State transition function f(x) -> x_next.
765
- S_Q : array_like
766
- Cholesky factor of process noise covariance.
767
- alpha : float, optional
768
- Spread of sigma points around mean. Default 1e-3.
769
- beta : float, optional
770
- Prior knowledge about distribution. Default 2.0 (Gaussian).
771
- kappa : float, optional
772
- Secondary scaling parameter. Default 0.0.
773
-
774
- Returns
775
- -------
776
- result : SRKalmanPrediction
777
- Predicted state and Cholesky factor.
778
- """
779
- x = np.asarray(x, dtype=np.float64).flatten()
780
- S = np.asarray(S, dtype=np.float64)
781
- S_Q = np.asarray(S_Q, dtype=np.float64)
782
- n = len(x)
783
-
784
- # Sigma point parameters
785
- lam = alpha**2 * (n + kappa) - n
786
- gamma = np.sqrt(n + lam)
787
-
788
- # Weights
789
- W_m = np.zeros(2 * n + 1)
790
- W_c = np.zeros(2 * n + 1)
791
- W_m[0] = lam / (n + lam)
792
- W_c[0] = lam / (n + lam) + (1 - alpha**2 + beta)
793
- for i in range(1, 2 * n + 1):
794
- W_m[i] = 1 / (2 * (n + lam))
795
- W_c[i] = 1 / (2 * (n + lam))
796
-
797
- # Generate sigma points
798
- sigma_points = np.zeros((n, 2 * n + 1))
799
- sigma_points[:, 0] = x
800
- for i in range(n):
801
- sigma_points[:, i + 1] = x + gamma * S[:, i]
802
- sigma_points[:, n + i + 1] = x - gamma * S[:, i]
803
-
804
- # Propagate sigma points
805
- sigma_points_pred = np.zeros_like(sigma_points)
806
- for i in range(2 * n + 1):
807
- sigma_points_pred[:, i] = f(sigma_points[:, i])
808
-
809
- # Predicted mean
810
- x_pred = np.sum(W_m * sigma_points_pred, axis=1)
811
-
812
- # Predicted covariance square root via QR
813
- # Build matrix for QR: [sqrt(W_c[1]) * (X - x_mean), S_Q]
814
- residuals = sigma_points_pred[:, 1:] - x_pred[:, np.newaxis]
815
- sqrt_Wc = np.sqrt(np.abs(W_c[1:]))
816
- weighted_residuals = residuals * sqrt_Wc
817
-
818
- compound = np.hstack([weighted_residuals, S_Q]).T
819
- _, R = np.linalg.qr(compound)
820
- S_pred = R[:n, :n].T
821
-
822
- # Handle negative weight for mean point
823
- if W_c[0] < 0:
824
- # Downdate for the mean point
825
- v = sigma_points_pred[:, 0] - x_pred
826
- try:
827
- S_pred = cholesky_update(S_pred, np.sqrt(np.abs(W_c[0])) * v, sign=-1.0)
828
- except ValueError:
829
- # Fall back to direct computation
830
- pass
831
- else:
832
- v = sigma_points_pred[:, 0] - x_pred
833
- S_pred = cholesky_update(S_pred, np.sqrt(W_c[0]) * v, sign=1.0)
834
-
835
- # Ensure lower triangular with positive diagonal
836
- for i in range(n):
837
- if S_pred[i, i] < 0:
838
- S_pred[i:, i] = -S_pred[i:, i]
839
-
840
- return SRKalmanPrediction(x=x_pred, S=S_pred)
841
-
842
-
843
- def sr_ukf_update(
844
- x: ArrayLike,
845
- S: ArrayLike,
846
- z: ArrayLike,
847
- h: Callable,
848
- S_R: ArrayLike,
849
- alpha: float = 1e-3,
850
- beta: float = 2.0,
851
- kappa: float = 0.0,
852
- ) -> SRKalmanUpdate:
853
- """
854
- Square-root Unscented Kalman Filter update step.
855
-
856
- Parameters
857
- ----------
858
- x : array_like
859
- Predicted state estimate, shape (n,).
860
- S : array_like
861
- Lower triangular Cholesky factor of covariance, shape (n, n).
862
- z : array_like
863
- Measurement, shape (m,).
864
- h : callable
865
- Measurement function h(x) -> z.
866
- S_R : array_like
867
- Cholesky factor of measurement noise covariance.
868
- alpha, beta, kappa : float
869
- UKF scaling parameters.
870
-
871
- Returns
872
- -------
873
- result : SRKalmanUpdate
874
- Updated state and Cholesky factor.
875
- """
876
- x = np.asarray(x, dtype=np.float64).flatten()
877
- S = np.asarray(S, dtype=np.float64)
878
- z = np.asarray(z, dtype=np.float64).flatten()
879
- S_R = np.asarray(S_R, dtype=np.float64)
880
- n = len(x)
881
- m = len(z)
882
-
883
- # Sigma point parameters
884
- lam = alpha**2 * (n + kappa) - n
885
- gamma = np.sqrt(n + lam)
886
-
887
- # Weights
888
- W_m = np.zeros(2 * n + 1)
889
- W_c = np.zeros(2 * n + 1)
890
- W_m[0] = lam / (n + lam)
891
- W_c[0] = lam / (n + lam) + (1 - alpha**2 + beta)
892
- for i in range(1, 2 * n + 1):
893
- W_m[i] = 1 / (2 * (n + lam))
894
- W_c[i] = 1 / (2 * (n + lam))
895
-
896
- # Generate sigma points
897
- sigma_points = np.zeros((n, 2 * n + 1))
898
- sigma_points[:, 0] = x
899
- for i in range(n):
900
- sigma_points[:, i + 1] = x + gamma * S[:, i]
901
- sigma_points[:, n + i + 1] = x - gamma * S[:, i]
902
-
903
- # Propagate through measurement function
904
- Z = np.zeros((m, 2 * n + 1))
905
- for i in range(2 * n + 1):
906
- Z[:, i] = h(sigma_points[:, i])
907
-
908
- # Predicted measurement mean
909
- z_pred = np.sum(W_m * Z, axis=1)
910
-
911
- # Innovation
912
- y = z - z_pred
913
-
914
- # Innovation covariance square root via QR
915
- residuals_z = Z[:, 1:] - z_pred[:, np.newaxis]
916
- sqrt_Wc = np.sqrt(np.abs(W_c[1:]))
917
- weighted_residuals_z = residuals_z * sqrt_Wc
918
-
919
- compound_z = np.hstack([weighted_residuals_z, S_R]).T
920
- _, R_z = np.linalg.qr(compound_z)
921
- S_y = R_z[:m, :m].T
922
-
923
- # Handle mean point weight
924
- v_z = Z[:, 0] - z_pred
925
- if W_c[0] >= 0:
926
- S_y = cholesky_update(S_y, np.sqrt(W_c[0]) * v_z, sign=1.0)
927
-
928
- for i in range(m):
929
- if S_y[i, i] < 0:
930
- S_y[i:, i] = -S_y[i:, i]
931
-
932
- # Cross covariance
933
- residuals_x = sigma_points[:, 1:] - x[:, np.newaxis]
934
- P_xz = (
935
- W_c[0] * np.outer(sigma_points[:, 0] - x, Z[:, 0] - z_pred)
936
- + (residuals_x * W_c[1:]) @ (Z[:, 1:] - z_pred[:, np.newaxis]).T
937
- )
938
-
939
- # Kalman gain
940
- K = scipy.linalg.solve_triangular(
941
- S_y.T, scipy.linalg.solve_triangular(S_y, P_xz.T, lower=True), lower=False
942
- ).T
943
-
944
- # Updated state
945
- x_upd = x + K @ y
946
-
947
- # Updated covariance square root
948
- S_upd = S.copy()
949
- KS_y = K @ S_y
950
- for j in range(m):
951
- try:
952
- S_upd = cholesky_update(S_upd, KS_y[:, j], sign=-1.0)
953
- except ValueError:
954
- # Fallback: compute directly
955
- P = S_upd @ S_upd.T - np.outer(KS_y[:, j], KS_y[:, j])
956
- P = (P + P.T) / 2
957
- eigvals = np.linalg.eigvalsh(P)
958
- if np.min(eigvals) < 0:
959
- P = P + (np.abs(np.min(eigvals)) + 1e-10) * np.eye(n)
960
- S_upd = np.linalg.cholesky(P)
961
-
962
- # Likelihood
963
- det_S_y = np.prod(np.diag(S_y)) ** 2
964
- if det_S_y > 0:
965
- y_normalized = scipy.linalg.solve_triangular(S_y, y, lower=True)
966
- mahal_sq = np.sum(y_normalized**2)
967
- likelihood = np.exp(-0.5 * mahal_sq) / np.sqrt((2 * np.pi) ** m * det_S_y)
968
- else:
969
- likelihood = 0.0
970
-
971
- return SRKalmanUpdate(
972
- x=x_upd,
973
- S=S_upd,
974
- y=y,
975
- S_y=S_y,
976
- K=K,
977
- likelihood=likelihood,
978
- )
979
-
446
+ # Square-root UKF (now in sr_ukf.py)
447
+ from pytcl.dynamic_estimation.kalman.sr_ukf import ( # noqa: E402
448
+ sr_ukf_predict,
449
+ sr_ukf_update,
450
+ )
451
+
452
+ # U-D factorization filter (now in ud_filter.py)
453
+ from pytcl.dynamic_estimation.kalman.ud_filter import ( # noqa: E402
454
+ UDState,
455
+ ud_factorize,
456
+ ud_predict,
457
+ ud_reconstruct,
458
+ ud_update,
459
+ ud_update_scalar,
460
+ )
980
461
 
981
462
  __all__ = [
982
463
  # Square-root KF types
@@ -990,14 +471,14 @@ __all__ = [
990
471
  "srkf_predict",
991
472
  "srkf_update",
992
473
  "srkf_predict_update",
993
- # U-D factorization
474
+ # U-D factorization (re-exported for backward compatibility)
994
475
  "UDState",
995
476
  "ud_factorize",
996
477
  "ud_reconstruct",
997
478
  "ud_predict",
998
479
  "ud_update_scalar",
999
480
  "ud_update",
1000
- # Square-root UKF
481
+ # Square-root UKF (re-exported for backward compatibility)
1001
482
  "sr_ukf_predict",
1002
483
  "sr_ukf_update",
1003
484
  ]