nrl-tracker 0.22.5__py3-none-any.whl → 1.7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.7.5.dist-info}/METADATA +57 -10
  2. {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.7.5.dist-info}/RECORD +84 -69
  3. pytcl/__init__.py +4 -3
  4. pytcl/assignment_algorithms/__init__.py +28 -0
  5. pytcl/assignment_algorithms/gating.py +10 -10
  6. pytcl/assignment_algorithms/jpda.py +40 -40
  7. pytcl/assignment_algorithms/nd_assignment.py +379 -0
  8. pytcl/assignment_algorithms/network_flow.py +371 -0
  9. pytcl/assignment_algorithms/three_dimensional/assignment.py +3 -3
  10. pytcl/astronomical/__init__.py +104 -3
  11. pytcl/astronomical/ephemerides.py +14 -11
  12. pytcl/astronomical/reference_frames.py +865 -56
  13. pytcl/astronomical/relativity.py +6 -5
  14. pytcl/astronomical/sgp4.py +710 -0
  15. pytcl/astronomical/special_orbits.py +532 -0
  16. pytcl/astronomical/tle.py +558 -0
  17. pytcl/atmosphere/__init__.py +43 -1
  18. pytcl/atmosphere/ionosphere.py +512 -0
  19. pytcl/atmosphere/nrlmsise00.py +809 -0
  20. pytcl/clustering/dbscan.py +2 -2
  21. pytcl/clustering/gaussian_mixture.py +3 -3
  22. pytcl/clustering/hierarchical.py +15 -15
  23. pytcl/clustering/kmeans.py +4 -4
  24. pytcl/containers/__init__.py +24 -0
  25. pytcl/containers/base.py +219 -0
  26. pytcl/containers/cluster_set.py +12 -2
  27. pytcl/containers/covertree.py +26 -29
  28. pytcl/containers/kd_tree.py +94 -29
  29. pytcl/containers/rtree.py +200 -1
  30. pytcl/containers/vptree.py +21 -28
  31. pytcl/coordinate_systems/conversions/geodetic.py +272 -5
  32. pytcl/coordinate_systems/jacobians/jacobians.py +2 -2
  33. pytcl/coordinate_systems/projections/__init__.py +1 -1
  34. pytcl/coordinate_systems/projections/projections.py +2 -2
  35. pytcl/coordinate_systems/rotations/rotations.py +10 -6
  36. pytcl/core/__init__.py +18 -0
  37. pytcl/core/validation.py +333 -2
  38. pytcl/dynamic_estimation/__init__.py +26 -0
  39. pytcl/dynamic_estimation/gaussian_sum_filter.py +434 -0
  40. pytcl/dynamic_estimation/imm.py +14 -14
  41. pytcl/dynamic_estimation/kalman/__init__.py +30 -0
  42. pytcl/dynamic_estimation/kalman/constrained.py +382 -0
  43. pytcl/dynamic_estimation/kalman/extended.py +8 -8
  44. pytcl/dynamic_estimation/kalman/h_infinity.py +613 -0
  45. pytcl/dynamic_estimation/kalman/square_root.py +60 -573
  46. pytcl/dynamic_estimation/kalman/sr_ukf.py +302 -0
  47. pytcl/dynamic_estimation/kalman/ud_filter.py +410 -0
  48. pytcl/dynamic_estimation/kalman/unscented.py +8 -6
  49. pytcl/dynamic_estimation/particle_filters/bootstrap.py +15 -15
  50. pytcl/dynamic_estimation/rbpf.py +589 -0
  51. pytcl/gravity/egm.py +13 -0
  52. pytcl/gravity/spherical_harmonics.py +98 -37
  53. pytcl/gravity/tides.py +6 -6
  54. pytcl/logging_config.py +328 -0
  55. pytcl/magnetism/__init__.py +7 -0
  56. pytcl/magnetism/emm.py +10 -3
  57. pytcl/magnetism/wmm.py +260 -23
  58. pytcl/mathematical_functions/combinatorics/combinatorics.py +5 -5
  59. pytcl/mathematical_functions/geometry/geometry.py +5 -5
  60. pytcl/mathematical_functions/numerical_integration/quadrature.py +6 -6
  61. pytcl/mathematical_functions/signal_processing/detection.py +24 -24
  62. pytcl/mathematical_functions/signal_processing/filters.py +14 -14
  63. pytcl/mathematical_functions/signal_processing/matched_filter.py +12 -12
  64. pytcl/mathematical_functions/special_functions/bessel.py +15 -3
  65. pytcl/mathematical_functions/special_functions/debye.py +136 -26
  66. pytcl/mathematical_functions/special_functions/error_functions.py +3 -1
  67. pytcl/mathematical_functions/special_functions/gamma_functions.py +4 -4
  68. pytcl/mathematical_functions/special_functions/hypergeometric.py +81 -15
  69. pytcl/mathematical_functions/transforms/fourier.py +8 -8
  70. pytcl/mathematical_functions/transforms/stft.py +12 -12
  71. pytcl/mathematical_functions/transforms/wavelets.py +9 -9
  72. pytcl/navigation/geodesy.py +246 -160
  73. pytcl/navigation/great_circle.py +101 -19
  74. pytcl/plotting/coordinates.py +7 -7
  75. pytcl/plotting/tracks.py +2 -2
  76. pytcl/static_estimation/maximum_likelihood.py +16 -14
  77. pytcl/static_estimation/robust.py +5 -5
  78. pytcl/terrain/loaders.py +5 -5
  79. pytcl/trackers/hypothesis.py +1 -1
  80. pytcl/trackers/mht.py +9 -9
  81. pytcl/trackers/multi_target.py +1 -1
  82. {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.7.5.dist-info}/LICENSE +0 -0
  83. {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.7.5.dist-info}/WHEEL +0 -0
  84. {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.7.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,589 @@
1
+ """Rao-Blackwellized Particle Filter (RBPF).
2
+
3
+ The RBPF partitions the state into a nonlinear part (handled by particles) and
4
+ a linear part (handled by Kalman filters for each particle). This provides
5
+ better estimation quality than plain particle filters for systems with both
6
+ nonlinear and linear dynamics.
7
+
8
+ The algorithm:
9
+ 1. Maintain N particles, each with:
10
+ - Position in nonlinear state space (y)
11
+ - Kalman filter state (x, P) for linear subspace
12
+ - Weight w based on measurement likelihood
13
+ 2. For each time step:
14
+ - Predict: Propagate nonlinear particles, update KF for each
15
+ - Update: Compute measurement likelihood, adapt weights
16
+ - Resample: When effective sample size is low, draw new particles
17
+ - Merge: Combine nearby particles to reduce variance
18
+
19
+ References:
20
+ - Doucet et al., "On Sequential Monte Carlo Sampling with Adaptive Weights"
21
+ (Doucet & Tadic, 2003)
22
+ - Andrieu et al., "Particle Methods for Change Detection, System Identification"
23
+ (IEEE SPM, 2004)
24
+ """
25
+
26
+ from typing import Any, Callable, NamedTuple
27
+
28
+ import numpy as np
29
+ from numpy.typing import NDArray
30
+
31
+ from pytcl.dynamic_estimation.kalman.extended import ekf_predict, ekf_update
32
+
33
+
34
+ class RBPFParticle(NamedTuple):
35
+ """Rao-Blackwellized particle with nonlinear and linear components.
36
+
37
+ Parameters
38
+ ----------
39
+ y : NDArray
40
+ Nonlinear state component (propagated by particle transition)
41
+ x : NDArray
42
+ Linear state component (estimated by Kalman filter for this particle)
43
+ P : NDArray
44
+ Covariance of linear state component
45
+ w : float
46
+ Particle weight (typically normalized to sum to 1)
47
+ """
48
+
49
+ y: NDArray[Any]
50
+ x: NDArray[Any]
51
+ P: NDArray[Any]
52
+ w: float
53
+
54
+
55
+ class RBPFFilter:
56
+ """Rao-Blackwellized Particle Filter.
57
+
58
+ Combines particle filtering for nonlinear states with Kalman filtering
59
+ for conditionally-linear states. For a system partitioned as:
60
+ - y: nonlinear state (particles)
61
+ - x: linear state given y (Kalman filter)
62
+
63
+ Attributes
64
+ ----------
65
+ particles : list[RBPFParticle]
66
+ Current particles with nonlinear/linear states and weights
67
+ max_particles : int
68
+ Maximum number of particles (default 100)
69
+ resample_threshold : float
70
+ Resample when N_eff < resample_threshold * N (default 0.5)
71
+ merge_threshold : float
72
+ Merge nearby particles when KL divergence < threshold (default 0.5)
73
+ """
74
+
75
+ def __init__(
76
+ self,
77
+ max_particles: int = 100,
78
+ resample_threshold: float = 0.5,
79
+ merge_threshold: float = 0.5,
80
+ ):
81
+ """Initialize RBPF.
82
+
83
+ Parameters
84
+ ----------
85
+ max_particles : int
86
+ Maximum number of particles to maintain
87
+ resample_threshold : float
88
+ Resample threshold as fraction of max particles
89
+ merge_threshold : float
90
+ KL divergence threshold for merging particles
91
+ """
92
+ self.particles: list[RBPFParticle] = []
93
+ self.max_particles = max_particles
94
+ self.resample_threshold = resample_threshold
95
+ self.merge_threshold = merge_threshold
96
+
97
+ def initialize(
98
+ self,
99
+ y0: NDArray[Any],
100
+ x0: NDArray[Any],
101
+ P0: NDArray[Any],
102
+ num_particles: int = 100,
103
+ ) -> None:
104
+ """Initialize particles.
105
+
106
+ Parameters
107
+ ----------
108
+ y0 : NDArray
109
+ Initial nonlinear state (broadcasted to all particles)
110
+ x0 : NDArray
111
+ Initial linear state (broadcasted to all particles)
112
+ P0 : NDArray
113
+ Initial linear state covariance (same for all particles)
114
+ num_particles : int
115
+ Number of particles to initialize
116
+ """
117
+ self.particles = []
118
+ weight = 1.0 / num_particles
119
+
120
+ # Add small noise to particle y values to break ties
121
+ ny = y0.shape[0]
122
+
123
+ for i in range(num_particles):
124
+ # Nonlinear component: small perturbation around y0
125
+ y = y0 + np.random.randn(ny) * 1e-6
126
+ # Linear component: same for all particles (improved by update)
127
+ x = x0.copy()
128
+ P = P0.copy()
129
+
130
+ particle = RBPFParticle(y=y, x=x, P=P, w=weight)
131
+ self.particles.append(particle)
132
+
133
+ def predict(
134
+ self,
135
+ g: Callable[[NDArray[Any]], NDArray[Any]],
136
+ G: NDArray[Any],
137
+ Qy: NDArray[Any],
138
+ f: Callable[[NDArray[Any], NDArray[Any]], NDArray[Any]],
139
+ F: NDArray[Any],
140
+ Qx: NDArray[Any],
141
+ ) -> None:
142
+ """Predict step: propagate particles and linear states.
143
+
144
+ Parameters
145
+ ----------
146
+ g : callable
147
+ Nonlinear state transition: y[k+1] = g(y[k])
148
+ G : NDArray
149
+ Jacobian of g with respect to y (for covariance propagation)
150
+ Qy : NDArray
151
+ Process noise covariance for nonlinear state
152
+ f : callable
153
+ Linear transition: x[k+1] = f(x[k], y[k])
154
+ F : NDArray
155
+ Jacobian matrix dF/dx (linearized around y)
156
+ Qx : NDArray
157
+ Process noise covariance for linear state
158
+ """
159
+ new_particles = []
160
+
161
+ for particle in self.particles:
162
+ # Predict nonlinear component
163
+ y_pred = g(particle.y)
164
+ # Add process noise
165
+ y_pred = y_pred + np.random.multivariate_normal(
166
+ np.zeros(y_pred.shape[0]), Qy
167
+ )
168
+
169
+ # Create wrapper for linear dynamics with current y_pred
170
+ def f_wrapper(x: NDArray[Any]) -> NDArray[Any]:
171
+ return f(x, y_pred)
172
+
173
+ # Predict linear component using EKF
174
+ pred = ekf_predict(particle.x, particle.P, f_wrapper, F, Qx)
175
+
176
+ new_particle = RBPFParticle(
177
+ y=y_pred,
178
+ x=pred.x,
179
+ P=pred.P,
180
+ w=particle.w,
181
+ )
182
+ new_particles.append(new_particle)
183
+
184
+ self.particles = new_particles
185
+
186
+ def update(
187
+ self,
188
+ z: NDArray[Any],
189
+ h: Callable[[NDArray[Any], NDArray[Any]], NDArray[Any]],
190
+ H: NDArray[Any],
191
+ R: NDArray[Any],
192
+ ) -> None:
193
+ """Update step: adapt particle weights based on measurement.
194
+
195
+ Parameters
196
+ ----------
197
+ z : NDArray
198
+ Measurement vector
199
+ h : callable
200
+ Measurement function: z = h(x, y)
201
+ H : NDArray
202
+ Jacobian matrix dH/dx (measurement sensitivity)
203
+ R : NDArray
204
+ Measurement noise covariance
205
+ """
206
+ weights = np.zeros(len(self.particles))
207
+ new_particles = []
208
+
209
+ for i, particle in enumerate(self.particles):
210
+ # Create wrapper for measurement function with current y
211
+ def h_wrapper(x: NDArray[Any]) -> NDArray[Any]:
212
+ return h(x, particle.y)
213
+
214
+ # Update linear component (Kalman update)
215
+ upd = ekf_update(particle.x, particle.P, z, h_wrapper, H, R)
216
+
217
+ # Weight: measurement likelihood from Kalman update
218
+ likelihood = upd.likelihood
219
+
220
+ # Unnormalized weight
221
+ weights[i] = particle.w * likelihood
222
+
223
+ new_particle = RBPFParticle(
224
+ y=particle.y,
225
+ x=upd.x,
226
+ P=upd.P,
227
+ w=particle.w, # Will renormalize below
228
+ )
229
+ new_particles.append(new_particle)
230
+
231
+ # Normalize weights
232
+ w_sum = np.sum(weights)
233
+ if w_sum > 0:
234
+ weights = weights / w_sum
235
+ else:
236
+ # Uniform weights if all likelihoods are zero
237
+ weights = np.ones(len(self.particles)) / len(self.particles)
238
+
239
+ # Update particles with new weights
240
+ self.particles = [
241
+ RBPFParticle(y=p.y, x=p.x, P=p.P, w=w)
242
+ for p, w in zip(new_particles, weights)
243
+ ]
244
+
245
+ # Resample if needed
246
+ self._resample_if_needed()
247
+
248
+ # Merge if too many particles
249
+ self._merge_particles()
250
+
251
+ def estimate(self) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]:
252
+ """Estimate state as weighted mean and covariance.
253
+
254
+ Returns
255
+ -------
256
+ y_est : NDArray
257
+ Weighted mean of nonlinear components
258
+ x_est : NDArray
259
+ Weighted mean of linear components
260
+ P_est : NDArray
261
+ Weighted covariance (includes mixture and linear uncertainties)
262
+ """
263
+ if not self.particles:
264
+ raise ValueError("No particles to estimate")
265
+
266
+ weights = np.array([p.w for p in self.particles])
267
+
268
+ # Nonlinear state: weighted mean
269
+ y_particles = np.array([p.y for p in self.particles])
270
+ y_est = np.average(y_particles, axis=0, weights=weights)
271
+
272
+ # Linear state: weighted mean and covariance
273
+ x_particles = np.array([p.x for p in self.particles])
274
+ x_est = np.average(x_particles, axis=0, weights=weights)
275
+
276
+ # Covariance: E[(x - x_est)(x - x_est)^T] = E[Cov[x|y]] + Cov[E[x|y]]
277
+ # = weighted_mean(P) + weighted_cov(x)
278
+
279
+ # Weighted mean of covariances
280
+ P_mean = np.zeros((self.particles[0].P.shape[0], self.particles[0].P.shape[1]))
281
+ for p in self.particles:
282
+ P_mean += p.w * p.P
283
+
284
+ # Weighted covariance of means
285
+ P_cov = np.zeros((self.particles[0].P.shape[0], self.particles[0].P.shape[1]))
286
+ for p in self.particles:
287
+ dx = p.x - x_est
288
+ P_cov += p.w * np.outer(dx, dx)
289
+
290
+ P_est = P_mean + P_cov
291
+
292
+ return y_est, x_est, P_est
293
+
294
+ def get_particles(self) -> list[RBPFParticle]:
295
+ """Get current particles.
296
+
297
+ Returns
298
+ -------
299
+ list[RBPFParticle]
300
+ Current particle list
301
+ """
302
+ return self.particles.copy()
303
+
304
+ def _resample_if_needed(self) -> None:
305
+ """Resample particles if effective sample size is too low.
306
+
307
+ Uses systematic resampling to reduce variance.
308
+ """
309
+ weights = np.array([p.w for p in self.particles])
310
+
311
+ # Effective sample size
312
+ N_eff = 1.0 / np.sum(weights**2)
313
+
314
+ threshold = self.resample_threshold * len(self.particles)
315
+
316
+ if N_eff < threshold:
317
+ self._systematic_resample()
318
+
319
+ def _systematic_resample(self) -> None:
320
+ """Perform systematic resampling."""
321
+ weights = np.array([p.w for p in self.particles])
322
+ n = len(self.particles)
323
+
324
+ # Cumulative sum
325
+ cs = np.cumsum(weights)
326
+
327
+ # Resample indices
328
+ indices = []
329
+ u = np.random.uniform(0, 1.0 / n)
330
+
331
+ j = 0
332
+ for i in range(n):
333
+ while u > cs[j]:
334
+ j += 1
335
+ indices.append(j)
336
+ u += 1.0 / n
337
+
338
+ # Create new particles with uniform weights
339
+ new_particles = []
340
+ weight = 1.0 / n
341
+
342
+ for idx in indices:
343
+ p = self.particles[idx]
344
+ new_particles.append(
345
+ RBPFParticle(y=p.y.copy(), x=p.x.copy(), P=p.P.copy(), w=weight)
346
+ )
347
+
348
+ self.particles = new_particles
349
+
350
+ def _merge_particles(self) -> None:
351
+ """Merge nearby particles to reduce variance."""
352
+ if len(self.particles) <= 1:
353
+ return
354
+
355
+ # Find closest pair by KL divergence
356
+ max_iter = len(self.particles) - self.max_particles
357
+
358
+ for _ in range(max_iter):
359
+ if len(self.particles) <= self.max_particles:
360
+ break
361
+
362
+ best_div = np.inf
363
+ best_i, best_j = 0, 1
364
+
365
+ # Find closest pair
366
+ for i in range(len(self.particles)):
367
+ for j in range(i + 1, len(self.particles)):
368
+ div = self._kl_divergence(
369
+ self.particles[i].P,
370
+ self.particles[j].P,
371
+ self.particles[i].x,
372
+ self.particles[j].x,
373
+ )
374
+ if div < best_div:
375
+ best_div = div
376
+ best_i, best_j = i, j
377
+
378
+ if best_div < self.merge_threshold:
379
+ # Merge particles i and j
380
+ p_i = self.particles[best_i]
381
+ p_j = self.particles[best_j]
382
+
383
+ # Weighted merge
384
+ w_total = p_i.w + p_j.w
385
+ w_i = p_i.w / w_total
386
+ w_j = p_j.w / w_total
387
+
388
+ # Merged nonlinear state
389
+ y_merged = w_i * p_i.y + w_j * p_j.y
390
+
391
+ # Merged linear state and covariance
392
+ x_merged = w_i * p_i.x + w_j * p_j.x
393
+
394
+ # Merged covariance
395
+ P_merged = (
396
+ w_i * p_i.P
397
+ + w_j * p_j.P
398
+ + w_i * np.outer(p_i.x - x_merged, p_i.x - x_merged)
399
+ + w_j * np.outer(p_j.x - x_merged, p_j.x - x_merged)
400
+ )
401
+
402
+ merged_particle = RBPFParticle(
403
+ y=y_merged, x=x_merged, P=P_merged, w=w_total
404
+ )
405
+
406
+ # Replace particles
407
+ if best_i < best_j:
408
+ self.particles[best_i] = merged_particle
409
+ self.particles.pop(best_j)
410
+ else:
411
+ self.particles[best_j] = merged_particle
412
+ self.particles.pop(best_i)
413
+ else:
414
+ break
415
+
416
+ # Renormalize weights
417
+ w_sum = sum(p.w for p in self.particles)
418
+ if w_sum > 0:
419
+ self.particles = [
420
+ RBPFParticle(y=p.y, x=p.x, P=p.P, w=p.w / w_sum) for p in self.particles
421
+ ]
422
+
423
+ @staticmethod
424
+ def _kl_divergence(
425
+ P1: NDArray[Any], P2: NDArray[Any], x1: NDArray[Any], x2: NDArray[Any]
426
+ ) -> float:
427
+ """Compute KL divergence between two Gaussians.
428
+
429
+ KL(N(x1, P1) || N(x2, P2)) = 0.5 * [
430
+ trace(P2^-1 @ P1) + (x2-x1)^T @ P2^-1 @ (x2-x1) - n + ln(|P2|/|P1|)
431
+ ]
432
+
433
+ Parameters
434
+ ----------
435
+ P1 : NDArray
436
+ Covariance of first Gaussian
437
+ P2 : NDArray
438
+ Covariance of second Gaussian
439
+ x1 : NDArray
440
+ Mean of first Gaussian
441
+ x2 : NDArray
442
+ Mean of second Gaussian
443
+
444
+ Returns
445
+ -------
446
+ float
447
+ KL divergence (always >= 0)
448
+ """
449
+ try:
450
+ P2_inv = np.linalg.inv(P2)
451
+ n = P1.shape[0]
452
+
453
+ # Trace term
454
+ trace_term = np.trace(P2_inv @ P1)
455
+
456
+ # Mean difference term
457
+ dx = x2 - x1
458
+ mean_term = dx @ P2_inv @ dx
459
+
460
+ # Determinant term
461
+ det_term = np.linalg.slogdet(P2)[1] - np.linalg.slogdet(P1)[1]
462
+
463
+ kl = 0.5 * (trace_term + mean_term - n + det_term)
464
+ return float(np.maximum(kl, 0.0)) # Ensure non-negative
465
+ except (np.linalg.LinAlgError, ValueError):
466
+ return np.inf
467
+
468
+
469
+ # Convenience functions for functional interface
470
+
471
+
472
+ def rbpf_predict(
473
+ particles: list[RBPFParticle],
474
+ g: Callable[[NDArray[Any]], NDArray[Any]],
475
+ G: NDArray[Any],
476
+ Qy: NDArray[Any],
477
+ f: Callable[[NDArray[Any], NDArray[Any]], NDArray[Any]],
478
+ F: NDArray[Any],
479
+ Qx: NDArray[Any],
480
+ ) -> list[RBPFParticle]:
481
+ """Predict step for RBPF particles.
482
+
483
+ Parameters
484
+ ----------
485
+ particles : list[RBPFParticle]
486
+ Current particles
487
+ g : callable
488
+ Nonlinear state transition
489
+ G : NDArray
490
+ Jacobian of nonlinear transition
491
+ Qy : NDArray
492
+ Process noise covariance for nonlinear state
493
+ f : callable
494
+ Linear state transition
495
+ F : NDArray
496
+ Jacobian of linear transition
497
+ Qx : NDArray
498
+ Process noise covariance for linear state
499
+
500
+ Returns
501
+ -------
502
+ list[RBPFParticle]
503
+ Predicted particles
504
+ """
505
+ new_particles = []
506
+
507
+ for particle in particles:
508
+ # Predict nonlinear component
509
+ y_pred = g(particle.y)
510
+ y_pred = y_pred + np.random.multivariate_normal(np.zeros(y_pred.shape[0]), Qy)
511
+
512
+ # Create wrapper for linear dynamics with current y_pred
513
+ def f_wrapper(x: NDArray[Any]) -> NDArray[Any]:
514
+ return f(x, y_pred)
515
+
516
+ # Predict linear component
517
+ pred = ekf_predict(particle.x, particle.P, f_wrapper, F, Qx)
518
+
519
+ new_particle = RBPFParticle(
520
+ y=y_pred,
521
+ x=pred.x,
522
+ P=pred.P,
523
+ w=particle.w,
524
+ )
525
+ new_particles.append(new_particle)
526
+
527
+ return new_particles
528
+
529
+
530
+ def rbpf_update(
531
+ particles: list[RBPFParticle],
532
+ z: NDArray[Any],
533
+ h: Callable[[NDArray[Any], NDArray[Any]], NDArray[Any]],
534
+ H: NDArray[Any],
535
+ R: NDArray[Any],
536
+ ) -> list[RBPFParticle]:
537
+ """Update step for RBPF particles.
538
+
539
+ Parameters
540
+ ----------
541
+ particles : list[RBPFParticle]
542
+ Predicted particles
543
+ z : NDArray
544
+ Measurement
545
+ h : callable
546
+ Measurement function
547
+ H : NDArray
548
+ Jacobian of measurement function
549
+ R : NDArray
550
+ Measurement noise covariance
551
+
552
+ Returns
553
+ -------
554
+ list[RBPFParticle]
555
+ Updated particles with adapted weights
556
+ """
557
+ weights = np.zeros(len(particles))
558
+ new_particles = []
559
+
560
+ for i, particle in enumerate(particles):
561
+ # Create wrapper for measurement function with current y
562
+ def h_wrapper(x: NDArray[Any]) -> NDArray[Any]:
563
+ return h(x, particle.y)
564
+
565
+ # Update linear component
566
+ upd = ekf_update(particle.x, particle.P, z, h_wrapper, H, R)
567
+
568
+ # Weight by measurement likelihood
569
+ weights[i] = particle.w * upd.likelihood
570
+
571
+ new_particle = RBPFParticle(
572
+ y=particle.y,
573
+ x=upd.x,
574
+ P=upd.P,
575
+ w=particle.w,
576
+ )
577
+ new_particles.append(new_particle)
578
+
579
+ # Normalize weights
580
+ w_sum = np.sum(weights)
581
+ if w_sum > 0:
582
+ weights = weights / w_sum
583
+ else:
584
+ weights = np.ones(len(particles)) / len(particles)
585
+
586
+ # Update with new weights
587
+ return [
588
+ RBPFParticle(y=p.y, x=p.x, P=p.P, w=w) for p, w in zip(new_particles, weights)
589
+ ]
pytcl/gravity/egm.py CHANGED
@@ -21,6 +21,7 @@ References
21
21
  https://earth-info.nga.mil/
22
22
  """
23
23
 
24
+ import logging
24
25
  import os
25
26
  from functools import lru_cache
26
27
  from pathlib import Path
@@ -32,6 +33,9 @@ from numpy.typing import NDArray
32
33
  from .clenshaw import clenshaw_gravity, clenshaw_potential
33
34
  from .models import WGS84, normal_gravity_somigliana
34
35
 
36
+ # Module logger
37
+ _logger = logging.getLogger("pytcl.gravity.egm")
38
+
35
39
 
36
40
  class EGMCoefficients(NamedTuple):
37
41
  """Earth Gravitational Model coefficients.
@@ -317,6 +321,8 @@ def _load_coefficients_cached(
317
321
  data_dir = get_data_dir()
318
322
  filepath = data_dir / f"{model}.cof"
319
323
 
324
+ _logger.debug("Loading %s coefficients from %s", model, filepath)
325
+
320
326
  if not filepath.exists():
321
327
  raise FileNotFoundError(
322
328
  f"Coefficient file not found: {filepath}\n"
@@ -330,6 +336,13 @@ def _load_coefficients_cached(
330
336
  actual_n_max = n_max if n_max is not None else int(params["n_max_full"])
331
337
  C, S = parse_egm_file(filepath, actual_n_max)
332
338
 
339
+ _logger.info(
340
+ "Loaded %s coefficients: n_max=%d, array_size=%.1f MB",
341
+ model,
342
+ C.shape[0] - 1,
343
+ C.nbytes / 1024 / 1024 * 2, # Both C and S arrays
344
+ )
345
+
333
346
  return EGMCoefficients(
334
347
  C=C,
335
348
  S=S,