nrl-tracker 1.6.0__py3-none-any.whl → 1.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/METADATA +14 -10
  2. {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/RECORD +75 -68
  3. pytcl/__init__.py +2 -2
  4. pytcl/assignment_algorithms/__init__.py +28 -0
  5. pytcl/assignment_algorithms/gating.py +10 -10
  6. pytcl/assignment_algorithms/jpda.py +40 -40
  7. pytcl/assignment_algorithms/nd_assignment.py +379 -0
  8. pytcl/assignment_algorithms/network_flow.py +371 -0
  9. pytcl/assignment_algorithms/three_dimensional/assignment.py +3 -3
  10. pytcl/astronomical/__init__.py +35 -0
  11. pytcl/astronomical/ephemerides.py +14 -11
  12. pytcl/astronomical/reference_frames.py +110 -4
  13. pytcl/astronomical/relativity.py +6 -5
  14. pytcl/astronomical/special_orbits.py +532 -0
  15. pytcl/atmosphere/__init__.py +11 -0
  16. pytcl/atmosphere/nrlmsise00.py +809 -0
  17. pytcl/clustering/dbscan.py +2 -2
  18. pytcl/clustering/gaussian_mixture.py +3 -3
  19. pytcl/clustering/hierarchical.py +15 -15
  20. pytcl/clustering/kmeans.py +4 -4
  21. pytcl/containers/base.py +3 -3
  22. pytcl/containers/cluster_set.py +12 -2
  23. pytcl/containers/covertree.py +5 -3
  24. pytcl/containers/rtree.py +1 -1
  25. pytcl/containers/vptree.py +4 -2
  26. pytcl/coordinate_systems/conversions/geodetic.py +272 -5
  27. pytcl/coordinate_systems/jacobians/jacobians.py +2 -2
  28. pytcl/coordinate_systems/projections/projections.py +2 -2
  29. pytcl/coordinate_systems/rotations/rotations.py +10 -6
  30. pytcl/core/validation.py +3 -3
  31. pytcl/dynamic_estimation/__init__.py +26 -0
  32. pytcl/dynamic_estimation/gaussian_sum_filter.py +434 -0
  33. pytcl/dynamic_estimation/imm.py +14 -14
  34. pytcl/dynamic_estimation/kalman/__init__.py +12 -0
  35. pytcl/dynamic_estimation/kalman/constrained.py +382 -0
  36. pytcl/dynamic_estimation/kalman/extended.py +8 -8
  37. pytcl/dynamic_estimation/kalman/h_infinity.py +2 -2
  38. pytcl/dynamic_estimation/kalman/square_root.py +8 -2
  39. pytcl/dynamic_estimation/kalman/sr_ukf.py +3 -3
  40. pytcl/dynamic_estimation/kalman/ud_filter.py +11 -5
  41. pytcl/dynamic_estimation/kalman/unscented.py +8 -6
  42. pytcl/dynamic_estimation/particle_filters/bootstrap.py +15 -15
  43. pytcl/dynamic_estimation/rbpf.py +589 -0
  44. pytcl/gravity/spherical_harmonics.py +3 -3
  45. pytcl/gravity/tides.py +6 -6
  46. pytcl/logging_config.py +3 -3
  47. pytcl/magnetism/emm.py +10 -3
  48. pytcl/magnetism/wmm.py +4 -4
  49. pytcl/mathematical_functions/combinatorics/combinatorics.py +5 -5
  50. pytcl/mathematical_functions/geometry/geometry.py +5 -5
  51. pytcl/mathematical_functions/numerical_integration/quadrature.py +6 -6
  52. pytcl/mathematical_functions/signal_processing/detection.py +24 -24
  53. pytcl/mathematical_functions/signal_processing/filters.py +14 -14
  54. pytcl/mathematical_functions/signal_processing/matched_filter.py +12 -12
  55. pytcl/mathematical_functions/special_functions/bessel.py +15 -3
  56. pytcl/mathematical_functions/special_functions/debye.py +5 -1
  57. pytcl/mathematical_functions/special_functions/error_functions.py +3 -1
  58. pytcl/mathematical_functions/special_functions/gamma_functions.py +4 -4
  59. pytcl/mathematical_functions/special_functions/hypergeometric.py +6 -4
  60. pytcl/mathematical_functions/transforms/fourier.py +8 -8
  61. pytcl/mathematical_functions/transforms/stft.py +12 -12
  62. pytcl/mathematical_functions/transforms/wavelets.py +9 -9
  63. pytcl/navigation/geodesy.py +3 -3
  64. pytcl/navigation/great_circle.py +5 -5
  65. pytcl/plotting/coordinates.py +7 -7
  66. pytcl/plotting/tracks.py +2 -2
  67. pytcl/static_estimation/maximum_likelihood.py +16 -14
  68. pytcl/static_estimation/robust.py +5 -5
  69. pytcl/terrain/loaders.py +5 -5
  70. pytcl/trackers/hypothesis.py +1 -1
  71. pytcl/trackers/mht.py +9 -9
  72. pytcl/trackers/multi_target.py +1 -1
  73. {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/LICENSE +0 -0
  74. {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/WHEEL +0 -0
  75. {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,589 @@
1
+ """Rao-Blackwellized Particle Filter (RBPF).
2
+
3
+ The RBPF partitions the state into a nonlinear part (handled by particles) and
4
+ a linear part (handled by Kalman filters for each particle). This provides
5
+ better estimation quality than plain particle filters for systems with both
6
+ nonlinear and linear dynamics.
7
+
8
+ The algorithm:
9
+ 1. Maintain N particles, each with:
10
+ - Position in nonlinear state space (y)
11
+ - Kalman filter state (x, P) for linear subspace
12
+ - Weight w based on measurement likelihood
13
+ 2. For each time step:
14
+ - Predict: Propagate nonlinear particles, update KF for each
15
+ - Update: Compute measurement likelihood, adapt weights
16
+ - Resample: When effective sample size is low, draw new particles
17
+ - Merge: Combine nearby particles to reduce variance
18
+
19
+ References:
20
+ - Doucet et al., "On Sequential Monte Carlo Sampling with Adaptive Weights"
21
+ (Doucet & Tadic, 2003)
22
+ - Andrieu et al., "Particle Methods for Change Detection, System Identification"
23
+ (IEEE SPM, 2004)
24
+ """
25
+
26
+ from typing import Any, Callable, NamedTuple
27
+
28
+ import numpy as np
29
+ from numpy.typing import NDArray
30
+
31
+ from pytcl.dynamic_estimation.kalman.extended import ekf_predict, ekf_update
32
+
33
+
34
+ class RBPFParticle(NamedTuple):
35
+ """Rao-Blackwellized particle with nonlinear and linear components.
36
+
37
+ Parameters
38
+ ----------
39
+ y : NDArray
40
+ Nonlinear state component (propagated by particle transition)
41
+ x : NDArray
42
+ Linear state component (estimated by Kalman filter for this particle)
43
+ P : NDArray
44
+ Covariance of linear state component
45
+ w : float
46
+ Particle weight (typically normalized to sum to 1)
47
+ """
48
+
49
+ y: NDArray[Any]
50
+ x: NDArray[Any]
51
+ P: NDArray[Any]
52
+ w: float
53
+
54
+
55
+ class RBPFFilter:
56
+ """Rao-Blackwellized Particle Filter.
57
+
58
+ Combines particle filtering for nonlinear states with Kalman filtering
59
+ for conditionally-linear states. For a system partitioned as:
60
+ - y: nonlinear state (particles)
61
+ - x: linear state given y (Kalman filter)
62
+
63
+ Attributes
64
+ ----------
65
+ particles : list[RBPFParticle]
66
+ Current particles with nonlinear/linear states and weights
67
+ max_particles : int
68
+ Maximum number of particles (default 100)
69
+ resample_threshold : float
70
+ Resample when N_eff < resample_threshold * N (default 0.5)
71
+ merge_threshold : float
72
+ Merge nearby particles when KL divergence < threshold (default 0.5)
73
+ """
74
+
75
+ def __init__(
76
+ self,
77
+ max_particles: int = 100,
78
+ resample_threshold: float = 0.5,
79
+ merge_threshold: float = 0.5,
80
+ ):
81
+ """Initialize RBPF.
82
+
83
+ Parameters
84
+ ----------
85
+ max_particles : int
86
+ Maximum number of particles to maintain
87
+ resample_threshold : float
88
+ Resample threshold as fraction of max particles
89
+ merge_threshold : float
90
+ KL divergence threshold for merging particles
91
+ """
92
+ self.particles: list[RBPFParticle] = []
93
+ self.max_particles = max_particles
94
+ self.resample_threshold = resample_threshold
95
+ self.merge_threshold = merge_threshold
96
+
97
+ def initialize(
98
+ self,
99
+ y0: NDArray[Any],
100
+ x0: NDArray[Any],
101
+ P0: NDArray[Any],
102
+ num_particles: int = 100,
103
+ ) -> None:
104
+ """Initialize particles.
105
+
106
+ Parameters
107
+ ----------
108
+ y0 : NDArray
109
+ Initial nonlinear state (broadcasted to all particles)
110
+ x0 : NDArray
111
+ Initial linear state (broadcasted to all particles)
112
+ P0 : NDArray
113
+ Initial linear state covariance (same for all particles)
114
+ num_particles : int
115
+ Number of particles to initialize
116
+ """
117
+ self.particles = []
118
+ weight = 1.0 / num_particles
119
+
120
+ # Add small noise to particle y values to break ties
121
+ ny = y0.shape[0]
122
+
123
+ for i in range(num_particles):
124
+ # Nonlinear component: small perturbation around y0
125
+ y = y0 + np.random.randn(ny) * 1e-6
126
+ # Linear component: same for all particles (improved by update)
127
+ x = x0.copy()
128
+ P = P0.copy()
129
+
130
+ particle = RBPFParticle(y=y, x=x, P=P, w=weight)
131
+ self.particles.append(particle)
132
+
133
+ def predict(
134
+ self,
135
+ g: Callable[[NDArray[Any]], NDArray[Any]],
136
+ G: NDArray[Any],
137
+ Qy: NDArray[Any],
138
+ f: Callable[[NDArray[Any], NDArray[Any]], NDArray[Any]],
139
+ F: NDArray[Any],
140
+ Qx: NDArray[Any],
141
+ ) -> None:
142
+ """Predict step: propagate particles and linear states.
143
+
144
+ Parameters
145
+ ----------
146
+ g : callable
147
+ Nonlinear state transition: y[k+1] = g(y[k])
148
+ G : NDArray
149
+ Jacobian of g with respect to y (for covariance propagation)
150
+ Qy : NDArray
151
+ Process noise covariance for nonlinear state
152
+ f : callable
153
+ Linear transition: x[k+1] = f(x[k], y[k])
154
+ F : NDArray
155
+ Jacobian matrix dF/dx (linearized around y)
156
+ Qx : NDArray
157
+ Process noise covariance for linear state
158
+ """
159
+ new_particles = []
160
+
161
+ for particle in self.particles:
162
+ # Predict nonlinear component
163
+ y_pred = g(particle.y)
164
+ # Add process noise
165
+ y_pred = y_pred + np.random.multivariate_normal(
166
+ np.zeros(y_pred.shape[0]), Qy
167
+ )
168
+
169
+ # Create wrapper for linear dynamics with current y_pred
170
+ def f_wrapper(x: NDArray[Any]) -> NDArray[Any]:
171
+ return f(x, y_pred)
172
+
173
+ # Predict linear component using EKF
174
+ pred = ekf_predict(particle.x, particle.P, f_wrapper, F, Qx)
175
+
176
+ new_particle = RBPFParticle(
177
+ y=y_pred,
178
+ x=pred.x,
179
+ P=pred.P,
180
+ w=particle.w,
181
+ )
182
+ new_particles.append(new_particle)
183
+
184
+ self.particles = new_particles
185
+
186
+ def update(
187
+ self,
188
+ z: NDArray[Any],
189
+ h: Callable[[NDArray[Any], NDArray[Any]], NDArray[Any]],
190
+ H: NDArray[Any],
191
+ R: NDArray[Any],
192
+ ) -> None:
193
+ """Update step: adapt particle weights based on measurement.
194
+
195
+ Parameters
196
+ ----------
197
+ z : NDArray
198
+ Measurement vector
199
+ h : callable
200
+ Measurement function: z = h(x, y)
201
+ H : NDArray
202
+ Jacobian matrix dH/dx (measurement sensitivity)
203
+ R : NDArray
204
+ Measurement noise covariance
205
+ """
206
+ weights = np.zeros(len(self.particles))
207
+ new_particles = []
208
+
209
+ for i, particle in enumerate(self.particles):
210
+ # Create wrapper for measurement function with current y
211
+ def h_wrapper(x: NDArray[Any]) -> NDArray[Any]:
212
+ return h(x, particle.y)
213
+
214
+ # Update linear component (Kalman update)
215
+ upd = ekf_update(particle.x, particle.P, z, h_wrapper, H, R)
216
+
217
+ # Weight: measurement likelihood from Kalman update
218
+ likelihood = upd.likelihood
219
+
220
+ # Unnormalized weight
221
+ weights[i] = particle.w * likelihood
222
+
223
+ new_particle = RBPFParticle(
224
+ y=particle.y,
225
+ x=upd.x,
226
+ P=upd.P,
227
+ w=particle.w, # Will renormalize below
228
+ )
229
+ new_particles.append(new_particle)
230
+
231
+ # Normalize weights
232
+ w_sum = np.sum(weights)
233
+ if w_sum > 0:
234
+ weights = weights / w_sum
235
+ else:
236
+ # Uniform weights if all likelihoods are zero
237
+ weights = np.ones(len(self.particles)) / len(self.particles)
238
+
239
+ # Update particles with new weights
240
+ self.particles = [
241
+ RBPFParticle(y=p.y, x=p.x, P=p.P, w=w)
242
+ for p, w in zip(new_particles, weights)
243
+ ]
244
+
245
+ # Resample if needed
246
+ self._resample_if_needed()
247
+
248
+ # Merge if too many particles
249
+ self._merge_particles()
250
+
251
+ def estimate(self) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]:
252
+ """Estimate state as weighted mean and covariance.
253
+
254
+ Returns
255
+ -------
256
+ y_est : NDArray
257
+ Weighted mean of nonlinear components
258
+ x_est : NDArray
259
+ Weighted mean of linear components
260
+ P_est : NDArray
261
+ Weighted covariance (includes mixture and linear uncertainties)
262
+ """
263
+ if not self.particles:
264
+ raise ValueError("No particles to estimate")
265
+
266
+ weights = np.array([p.w for p in self.particles])
267
+
268
+ # Nonlinear state: weighted mean
269
+ y_particles = np.array([p.y for p in self.particles])
270
+ y_est = np.average(y_particles, axis=0, weights=weights)
271
+
272
+ # Linear state: weighted mean and covariance
273
+ x_particles = np.array([p.x for p in self.particles])
274
+ x_est = np.average(x_particles, axis=0, weights=weights)
275
+
276
+ # Covariance: E[(x - x_est)(x - x_est)^T] = E[Cov[x|y]] + Cov[E[x|y]]
277
+ # = weighted_mean(P) + weighted_cov(x)
278
+
279
+ # Weighted mean of covariances
280
+ P_mean = np.zeros((self.particles[0].P.shape[0], self.particles[0].P.shape[1]))
281
+ for p in self.particles:
282
+ P_mean += p.w * p.P
283
+
284
+ # Weighted covariance of means
285
+ P_cov = np.zeros((self.particles[0].P.shape[0], self.particles[0].P.shape[1]))
286
+ for p in self.particles:
287
+ dx = p.x - x_est
288
+ P_cov += p.w * np.outer(dx, dx)
289
+
290
+ P_est = P_mean + P_cov
291
+
292
+ return y_est, x_est, P_est
293
+
294
+ def get_particles(self) -> list[RBPFParticle]:
295
+ """Get current particles.
296
+
297
+ Returns
298
+ -------
299
+ list[RBPFParticle]
300
+ Current particle list
301
+ """
302
+ return self.particles.copy()
303
+
304
+ def _resample_if_needed(self) -> None:
305
+ """Resample particles if effective sample size is too low.
306
+
307
+ Uses systematic resampling to reduce variance.
308
+ """
309
+ weights = np.array([p.w for p in self.particles])
310
+
311
+ # Effective sample size
312
+ N_eff = 1.0 / np.sum(weights**2)
313
+
314
+ threshold = self.resample_threshold * len(self.particles)
315
+
316
+ if N_eff < threshold:
317
+ self._systematic_resample()
318
+
319
+ def _systematic_resample(self) -> None:
320
+ """Perform systematic resampling."""
321
+ weights = np.array([p.w for p in self.particles])
322
+ n = len(self.particles)
323
+
324
+ # Cumulative sum
325
+ cs = np.cumsum(weights)
326
+
327
+ # Resample indices
328
+ indices = []
329
+ u = np.random.uniform(0, 1.0 / n)
330
+
331
+ j = 0
332
+ for i in range(n):
333
+ while u > cs[j]:
334
+ j += 1
335
+ indices.append(j)
336
+ u += 1.0 / n
337
+
338
+ # Create new particles with uniform weights
339
+ new_particles = []
340
+ weight = 1.0 / n
341
+
342
+ for idx in indices:
343
+ p = self.particles[idx]
344
+ new_particles.append(
345
+ RBPFParticle(y=p.y.copy(), x=p.x.copy(), P=p.P.copy(), w=weight)
346
+ )
347
+
348
+ self.particles = new_particles
349
+
350
+ def _merge_particles(self) -> None:
351
+ """Merge nearby particles to reduce variance."""
352
+ if len(self.particles) <= 1:
353
+ return
354
+
355
+ # Find closest pair by KL divergence
356
+ max_iter = len(self.particles) - self.max_particles
357
+
358
+ for _ in range(max_iter):
359
+ if len(self.particles) <= self.max_particles:
360
+ break
361
+
362
+ best_div = np.inf
363
+ best_i, best_j = 0, 1
364
+
365
+ # Find closest pair
366
+ for i in range(len(self.particles)):
367
+ for j in range(i + 1, len(self.particles)):
368
+ div = self._kl_divergence(
369
+ self.particles[i].P,
370
+ self.particles[j].P,
371
+ self.particles[i].x,
372
+ self.particles[j].x,
373
+ )
374
+ if div < best_div:
375
+ best_div = div
376
+ best_i, best_j = i, j
377
+
378
+ if best_div < self.merge_threshold:
379
+ # Merge particles i and j
380
+ p_i = self.particles[best_i]
381
+ p_j = self.particles[best_j]
382
+
383
+ # Weighted merge
384
+ w_total = p_i.w + p_j.w
385
+ w_i = p_i.w / w_total
386
+ w_j = p_j.w / w_total
387
+
388
+ # Merged nonlinear state
389
+ y_merged = w_i * p_i.y + w_j * p_j.y
390
+
391
+ # Merged linear state and covariance
392
+ x_merged = w_i * p_i.x + w_j * p_j.x
393
+
394
+ # Merged covariance
395
+ P_merged = (
396
+ w_i * p_i.P
397
+ + w_j * p_j.P
398
+ + w_i * np.outer(p_i.x - x_merged, p_i.x - x_merged)
399
+ + w_j * np.outer(p_j.x - x_merged, p_j.x - x_merged)
400
+ )
401
+
402
+ merged_particle = RBPFParticle(
403
+ y=y_merged, x=x_merged, P=P_merged, w=w_total
404
+ )
405
+
406
+ # Replace particles
407
+ if best_i < best_j:
408
+ self.particles[best_i] = merged_particle
409
+ self.particles.pop(best_j)
410
+ else:
411
+ self.particles[best_j] = merged_particle
412
+ self.particles.pop(best_i)
413
+ else:
414
+ break
415
+
416
+ # Renormalize weights
417
+ w_sum = sum(p.w for p in self.particles)
418
+ if w_sum > 0:
419
+ self.particles = [
420
+ RBPFParticle(y=p.y, x=p.x, P=p.P, w=p.w / w_sum) for p in self.particles
421
+ ]
422
+
423
+ @staticmethod
424
+ def _kl_divergence(
425
+ P1: NDArray[Any], P2: NDArray[Any], x1: NDArray[Any], x2: NDArray[Any]
426
+ ) -> float:
427
+ """Compute KL divergence between two Gaussians.
428
+
429
+ KL(N(x1, P1) || N(x2, P2)) = 0.5 * [
430
+ trace(P2^-1 @ P1) + (x2-x1)^T @ P2^-1 @ (x2-x1) - n + ln(|P2|/|P1|)
431
+ ]
432
+
433
+ Parameters
434
+ ----------
435
+ P1 : NDArray
436
+ Covariance of first Gaussian
437
+ P2 : NDArray
438
+ Covariance of second Gaussian
439
+ x1 : NDArray
440
+ Mean of first Gaussian
441
+ x2 : NDArray
442
+ Mean of second Gaussian
443
+
444
+ Returns
445
+ -------
446
+ float
447
+ KL divergence (always >= 0)
448
+ """
449
+ try:
450
+ P2_inv = np.linalg.inv(P2)
451
+ n = P1.shape[0]
452
+
453
+ # Trace term
454
+ trace_term = np.trace(P2_inv @ P1)
455
+
456
+ # Mean difference term
457
+ dx = x2 - x1
458
+ mean_term = dx @ P2_inv @ dx
459
+
460
+ # Determinant term
461
+ det_term = np.linalg.slogdet(P2)[1] - np.linalg.slogdet(P1)[1]
462
+
463
+ kl = 0.5 * (trace_term + mean_term - n + det_term)
464
+ return float(np.maximum(kl, 0.0)) # Ensure non-negative
465
+ except (np.linalg.LinAlgError, ValueError):
466
+ return np.inf
467
+
468
+
469
+ # Convenience functions for functional interface
470
+
471
+
472
+ def rbpf_predict(
473
+ particles: list[RBPFParticle],
474
+ g: Callable[[NDArray[Any]], NDArray[Any]],
475
+ G: NDArray[Any],
476
+ Qy: NDArray[Any],
477
+ f: Callable[[NDArray[Any], NDArray[Any]], NDArray[Any]],
478
+ F: NDArray[Any],
479
+ Qx: NDArray[Any],
480
+ ) -> list[RBPFParticle]:
481
+ """Predict step for RBPF particles.
482
+
483
+ Parameters
484
+ ----------
485
+ particles : list[RBPFParticle]
486
+ Current particles
487
+ g : callable
488
+ Nonlinear state transition
489
+ G : NDArray
490
+ Jacobian of nonlinear transition
491
+ Qy : NDArray
492
+ Process noise covariance for nonlinear state
493
+ f : callable
494
+ Linear state transition
495
+ F : NDArray
496
+ Jacobian of linear transition
497
+ Qx : NDArray
498
+ Process noise covariance for linear state
499
+
500
+ Returns
501
+ -------
502
+ list[RBPFParticle]
503
+ Predicted particles
504
+ """
505
+ new_particles = []
506
+
507
+ for particle in particles:
508
+ # Predict nonlinear component
509
+ y_pred = g(particle.y)
510
+ y_pred = y_pred + np.random.multivariate_normal(np.zeros(y_pred.shape[0]), Qy)
511
+
512
+ # Create wrapper for linear dynamics with current y_pred
513
+ def f_wrapper(x: NDArray[Any]) -> NDArray[Any]:
514
+ return f(x, y_pred)
515
+
516
+ # Predict linear component
517
+ pred = ekf_predict(particle.x, particle.P, f_wrapper, F, Qx)
518
+
519
+ new_particle = RBPFParticle(
520
+ y=y_pred,
521
+ x=pred.x,
522
+ P=pred.P,
523
+ w=particle.w,
524
+ )
525
+ new_particles.append(new_particle)
526
+
527
+ return new_particles
528
+
529
+
530
+ def rbpf_update(
531
+ particles: list[RBPFParticle],
532
+ z: NDArray[Any],
533
+ h: Callable[[NDArray[Any], NDArray[Any]], NDArray[Any]],
534
+ H: NDArray[Any],
535
+ R: NDArray[Any],
536
+ ) -> list[RBPFParticle]:
537
+ """Update step for RBPF particles.
538
+
539
+ Parameters
540
+ ----------
541
+ particles : list[RBPFParticle]
542
+ Predicted particles
543
+ z : NDArray
544
+ Measurement
545
+ h : callable
546
+ Measurement function
547
+ H : NDArray
548
+ Jacobian of measurement function
549
+ R : NDArray
550
+ Measurement noise covariance
551
+
552
+ Returns
553
+ -------
554
+ list[RBPFParticle]
555
+ Updated particles with adapted weights
556
+ """
557
+ weights = np.zeros(len(particles))
558
+ new_particles = []
559
+
560
+ for i, particle in enumerate(particles):
561
+ # Create wrapper for measurement function with current y
562
+ def h_wrapper(x: NDArray[Any]) -> NDArray[Any]:
563
+ return h(x, particle.y)
564
+
565
+ # Update linear component
566
+ upd = ekf_update(particle.x, particle.P, z, h_wrapper, H, R)
567
+
568
+ # Weight by measurement likelihood
569
+ weights[i] = particle.w * upd.likelihood
570
+
571
+ new_particle = RBPFParticle(
572
+ y=particle.y,
573
+ x=upd.x,
574
+ P=upd.P,
575
+ w=particle.w,
576
+ )
577
+ new_particles.append(new_particle)
578
+
579
+ # Normalize weights
580
+ w_sum = np.sum(weights)
581
+ if w_sum > 0:
582
+ weights = weights / w_sum
583
+ else:
584
+ weights = np.ones(len(particles)) / len(particles)
585
+
586
+ # Update with new weights
587
+ return [
588
+ RBPFParticle(y=p.y, x=p.x, P=p.P, w=w) for p, w in zip(new_particles, weights)
589
+ ]
@@ -13,7 +13,7 @@ References
13
13
 
14
14
  import logging
15
15
  from functools import lru_cache
16
- from typing import Optional, Tuple
16
+ from typing import Any, Optional, Tuple
17
17
 
18
18
  import numpy as np
19
19
  from numpy.typing import NDArray
@@ -37,7 +37,7 @@ def _associated_legendre_cached(
37
37
  m_max: int,
38
38
  x_quantized: float,
39
39
  normalized: bool,
40
- ) -> tuple:
40
+ ) -> tuple[tuple[np.ndarray[Any, Any], ...], ...]:
41
41
  """Cached Legendre polynomial computation (internal).
42
42
 
43
43
  Returns tuple of tuples for hashability.
@@ -543,7 +543,7 @@ def clear_legendre_cache() -> None:
543
543
  _logger.debug("Legendre polynomial cache cleared")
544
544
 
545
545
 
546
- def get_legendre_cache_info():
546
+ def get_legendre_cache_info() -> Any:
547
547
  """Get cache statistics for Legendre polynomials.
548
548
 
549
549
  Returns
pytcl/gravity/tides.py CHANGED
@@ -77,9 +77,9 @@ class OceanTideLoading(NamedTuple):
77
77
  Names of tidal constituents.
78
78
  """
79
79
 
80
- amplitude: NDArray
81
- phase: NDArray
82
- constituents: Tuple[str, ...]
80
+ amplitude: NDArray[np.floating]
81
+ phase: NDArray[np.floating]
82
+ constituents: tuple[str, ...]
83
83
 
84
84
 
85
85
  # Love and Shida numbers for degree 2 (IERS 2010)
@@ -593,9 +593,9 @@ def solid_earth_tide_gravity(
593
593
 
594
594
  def ocean_tide_loading_displacement(
595
595
  mjd: float,
596
- amplitude: NDArray,
597
- phase: NDArray,
598
- constituents: Tuple[str, ...] = ("M2", "S2", "N2", "K2", "K1", "O1", "P1", "Q1"),
596
+ amplitude: NDArray[np.floating],
597
+ phase: NDArray[np.floating],
598
+ constituents: tuple[str, ...] = ("M2", "S2", "N2", "K2", "K1", "O1", "P1", "Q1"),
599
599
  ) -> TidalDisplacement:
600
600
  """
601
601
  Compute ocean tide loading displacement.
pytcl/logging_config.py CHANGED
@@ -26,7 +26,7 @@ import functools
26
26
  import logging
27
27
  import time
28
28
  from contextlib import contextmanager
29
- from typing import Any, Callable, Optional, TypeVar
29
+ from typing import Any, Callable, Generator, Optional, TypeVar
30
30
 
31
31
  # Type variable for decorated functions
32
32
  F = TypeVar("F", bound=Callable[..., Any])
@@ -201,7 +201,7 @@ def TimingContext(
201
201
  logger: Optional[logging.Logger] = None,
202
202
  name: str = "operation",
203
203
  level: int = logging.DEBUG,
204
- ):
204
+ ) -> Generator[None, None, None]:
205
205
  """
206
206
  Context manager for timing code blocks.
207
207
 
@@ -268,7 +268,7 @@ class PerformanceTracker:
268
268
  self.max_ms = 0.0
269
269
 
270
270
  @contextmanager
271
- def track(self):
271
+ def track(self) -> Generator[None, None, None]:
272
272
  """Track a single operation."""
273
273
  start = time.perf_counter()
274
274
  try:
pytcl/magnetism/emm.py CHANGED
@@ -24,7 +24,7 @@ References
24
24
  import os
25
25
  from functools import lru_cache
26
26
  from pathlib import Path
27
- from typing import Dict, NamedTuple, Optional, Tuple
27
+ from typing import Any, NamedTuple, Optional, Tuple
28
28
 
29
29
  import numpy as np
30
30
  from numpy.typing import NDArray
@@ -32,7 +32,7 @@ from numpy.typing import NDArray
32
32
  from .wmm import MagneticResult
33
33
 
34
34
  # Model parameters
35
- EMM_PARAMETERS: Dict[str, Dict] = {
35
+ EMM_PARAMETERS: dict[str, dict[str, Any]] = {
36
36
  "EMM2017": {
37
37
  "n_max": 790,
38
38
  "epoch": 2017.0,
@@ -119,7 +119,14 @@ def _ensure_data_dir() -> Path:
119
119
  def parse_emm_file(
120
120
  filepath: Path,
121
121
  n_max: Optional[int] = None,
122
- ) -> Tuple[NDArray, NDArray, NDArray, NDArray, float, int]:
122
+ ) -> tuple[
123
+ NDArray[np.floating],
124
+ NDArray[np.floating],
125
+ NDArray[np.floating],
126
+ NDArray[np.floating],
127
+ float,
128
+ int,
129
+ ]:
123
130
  """Parse an EMM/WMMHR coefficient file.
124
131
 
125
132
  The file format is similar to WMM but with more coefficients: