nrl-tracker 1.9.1__py3-none-any.whl → 1.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.10.0.dist-info}/METADATA +49 -4
  2. {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.10.0.dist-info}/RECORD +68 -60
  3. pytcl/__init__.py +2 -2
  4. pytcl/assignment_algorithms/gating.py +18 -0
  5. pytcl/assignment_algorithms/jpda.py +56 -0
  6. pytcl/assignment_algorithms/nd_assignment.py +65 -0
  7. pytcl/assignment_algorithms/network_flow.py +40 -0
  8. pytcl/astronomical/ephemerides.py +18 -0
  9. pytcl/astronomical/orbital_mechanics.py +131 -0
  10. pytcl/atmosphere/ionosphere.py +44 -0
  11. pytcl/atmosphere/models.py +29 -0
  12. pytcl/clustering/dbscan.py +9 -0
  13. pytcl/clustering/gaussian_mixture.py +20 -0
  14. pytcl/clustering/hierarchical.py +29 -0
  15. pytcl/clustering/kmeans.py +9 -0
  16. pytcl/coordinate_systems/conversions/geodetic.py +46 -0
  17. pytcl/coordinate_systems/conversions/spherical.py +35 -0
  18. pytcl/coordinate_systems/rotations/rotations.py +147 -0
  19. pytcl/core/__init__.py +16 -0
  20. pytcl/core/maturity.py +346 -0
  21. pytcl/core/optional_deps.py +20 -0
  22. pytcl/dynamic_estimation/gaussian_sum_filter.py +55 -0
  23. pytcl/dynamic_estimation/imm.py +29 -0
  24. pytcl/dynamic_estimation/information_filter.py +64 -0
  25. pytcl/dynamic_estimation/kalman/extended.py +56 -0
  26. pytcl/dynamic_estimation/kalman/linear.py +69 -0
  27. pytcl/dynamic_estimation/kalman/unscented.py +81 -0
  28. pytcl/dynamic_estimation/particle_filters/bootstrap.py +146 -0
  29. pytcl/dynamic_estimation/rbpf.py +51 -0
  30. pytcl/dynamic_estimation/smoothers.py +58 -0
  31. pytcl/dynamic_models/continuous_time/dynamics.py +104 -0
  32. pytcl/dynamic_models/discrete_time/coordinated_turn.py +6 -0
  33. pytcl/dynamic_models/discrete_time/singer.py +12 -0
  34. pytcl/dynamic_models/process_noise/coordinated_turn.py +46 -0
  35. pytcl/dynamic_models/process_noise/polynomial.py +6 -0
  36. pytcl/dynamic_models/process_noise/singer.py +52 -0
  37. pytcl/gpu/__init__.py +153 -0
  38. pytcl/gpu/ekf.py +425 -0
  39. pytcl/gpu/kalman.py +543 -0
  40. pytcl/gpu/matrix_utils.py +486 -0
  41. pytcl/gpu/particle_filter.py +568 -0
  42. pytcl/gpu/ukf.py +476 -0
  43. pytcl/gpu/utils.py +582 -0
  44. pytcl/gravity/clenshaw.py +60 -0
  45. pytcl/gravity/egm.py +47 -0
  46. pytcl/gravity/models.py +34 -0
  47. pytcl/gravity/spherical_harmonics.py +73 -0
  48. pytcl/gravity/tides.py +34 -0
  49. pytcl/mathematical_functions/numerical_integration/quadrature.py +85 -0
  50. pytcl/mathematical_functions/special_functions/bessel.py +55 -0
  51. pytcl/mathematical_functions/special_functions/elliptic.py +42 -0
  52. pytcl/mathematical_functions/special_functions/error_functions.py +49 -0
  53. pytcl/mathematical_functions/special_functions/gamma_functions.py +43 -0
  54. pytcl/mathematical_functions/special_functions/lambert_w.py +5 -0
  55. pytcl/mathematical_functions/special_functions/marcum_q.py +16 -0
  56. pytcl/navigation/geodesy.py +101 -2
  57. pytcl/navigation/great_circle.py +71 -0
  58. pytcl/navigation/rhumb.py +74 -0
  59. pytcl/performance_evaluation/estimation_metrics.py +70 -0
  60. pytcl/performance_evaluation/track_metrics.py +30 -0
  61. pytcl/static_estimation/maximum_likelihood.py +54 -0
  62. pytcl/static_estimation/robust.py +57 -0
  63. pytcl/terrain/dem.py +69 -0
  64. pytcl/terrain/visibility.py +65 -0
  65. pytcl/trackers/hypothesis.py +65 -0
  66. {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.10.0.dist-info}/LICENSE +0 -0
  67. {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.10.0.dist-info}/WHEEL +0 -0
  68. {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.10.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,568 @@
1
+ """
2
+ GPU-accelerated Particle Filter using CuPy.
3
+
4
+ This module provides GPU-accelerated implementations of particle filtering
5
+ algorithms for highly nonlinear and non-Gaussian state estimation.
6
+
7
+ Key Features
8
+ ------------
9
+ - GPU-accelerated resampling (systematic, multinomial)
10
+ - Parallel weight computation
11
+ - Batch processing of multiple particle filters
12
+ - Efficient memory management
13
+
14
+ Performance
15
+ -----------
16
+ The GPU implementation achieves 8-15x speedup compared to CPU for:
17
+ - Large particle counts (N > 1000)
18
+ - Parallel processing of multiple targets
19
+
20
+ Examples
21
+ --------
22
+ >>> from pytcl.gpu.particle_filter import CuPyParticleFilter
23
+ >>> import numpy as np
24
+ >>>
25
+ >>> def dynamics(particles, t):
26
+ ... # Propagate particles through nonlinear dynamics
27
+ ... return particles + np.random.randn(*particles.shape) * 0.1
28
+ >>>
29
+ >>> def likelihood(particles, measurement):
30
+ ... # Compute likelihood for each particle
31
+ ... diff = particles[:, 0] - measurement
32
+ ... return np.exp(-0.5 * diff**2)
33
+ >>>
34
+ >>> pf = CuPyParticleFilter(n_particles=10000, state_dim=2)
35
+ >>> pf.predict(dynamics)
36
+ >>> pf.update(measurement, likelihood)
37
+ """
38
+
39
+ from typing import Callable, NamedTuple, Tuple
40
+
41
+ import numpy as np
42
+ from numpy.typing import ArrayLike, NDArray
43
+
44
+ from pytcl.core.optional_deps import import_optional, requires
45
+ from pytcl.gpu.utils import ensure_gpu_array, to_cpu
46
+
47
+
48
+ class ParticleFilterState(NamedTuple):
49
+ """State of a particle filter.
50
+
51
+ Attributes
52
+ ----------
53
+ particles : ndarray
54
+ Particle states, shape (n_particles, state_dim).
55
+ weights : ndarray
56
+ Normalized particle weights, shape (n_particles,).
57
+ ess : float
58
+ Effective sample size.
59
+ """
60
+
61
+ particles: NDArray[np.floating]
62
+ weights: NDArray[np.floating]
63
+ ess: float
64
+
65
+
66
+ @requires("cupy", extra="gpu", feature="GPU particle filter")
67
+ def gpu_effective_sample_size(weights: ArrayLike) -> float:
68
+ """
69
+ Compute effective sample size on GPU.
70
+
71
+ ESS = 1 / sum(w_i^2)
72
+
73
+ Parameters
74
+ ----------
75
+ weights : array_like
76
+ Normalized particle weights.
77
+
78
+ Returns
79
+ -------
80
+ ess : float
81
+ Effective sample size.
82
+ """
83
+ cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
84
+ w = ensure_gpu_array(weights, dtype=cp.float64)
85
+ ess = 1.0 / float(cp.sum(w**2))
86
+ return ess
87
+
88
+
89
+ @requires("cupy", extra="gpu", feature="GPU particle filter")
90
+ def gpu_resample_systematic(weights: ArrayLike) -> NDArray[np.intp]:
91
+ """
92
+ GPU-accelerated systematic resampling.
93
+
94
+ Systematic resampling uses a single random number to select particles,
95
+ resulting in low variance and O(N) complexity.
96
+
97
+ Parameters
98
+ ----------
99
+ weights : array_like
100
+ Normalized particle weights, shape (n_particles,).
101
+
102
+ Returns
103
+ -------
104
+ indices : ndarray
105
+ Resampled particle indices, shape (n_particles,).
106
+
107
+ Examples
108
+ --------
109
+ >>> import numpy as np
110
+ >>> from pytcl.gpu.particle_filter import gpu_resample_systematic
111
+ >>> weights = np.array([0.1, 0.3, 0.4, 0.2])
112
+ >>> indices = gpu_resample_systematic(weights)
113
+ >>> # Particles 1 and 2 will be selected more often
114
+ """
115
+ cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
116
+
117
+ w = ensure_gpu_array(weights, dtype=cp.float64)
118
+ n = len(w)
119
+
120
+ # Cumulative sum of weights
121
+ cumsum = cp.cumsum(w)
122
+
123
+ # Systematic sampling positions
124
+ u0 = cp.random.uniform(0, 1.0 / n)
125
+ positions = u0 + cp.arange(n, dtype=cp.float64) / n
126
+
127
+ # Find indices using searchsorted
128
+ indices = cp.searchsorted(cumsum, positions)
129
+
130
+ # Clip to valid range
131
+ indices = cp.clip(indices, 0, n - 1)
132
+
133
+ return indices
134
+
135
+
136
+ @requires("cupy", extra="gpu", feature="GPU particle filter")
137
+ def gpu_resample_multinomial(weights: ArrayLike) -> NDArray[np.intp]:
138
+ """
139
+ GPU-accelerated multinomial resampling.
140
+
141
+ Multinomial resampling samples particles independently according
142
+ to their weights.
143
+
144
+ Parameters
145
+ ----------
146
+ weights : array_like
147
+ Normalized particle weights, shape (n_particles,).
148
+
149
+ Returns
150
+ -------
151
+ indices : ndarray
152
+ Resampled particle indices, shape (n_particles,).
153
+
154
+ Notes
155
+ -----
156
+ Multinomial resampling has higher variance than systematic resampling
157
+ but is simpler and can be more efficient on GPU for certain sizes.
158
+ """
159
+ cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
160
+
161
+ w = ensure_gpu_array(weights, dtype=cp.float64)
162
+ n = len(w)
163
+
164
+ # Cumulative sum
165
+ cumsum = cp.cumsum(w)
166
+
167
+ # Generate random samples
168
+ u = cp.random.uniform(0, 1, n)
169
+
170
+ # Find indices
171
+ indices = cp.searchsorted(cumsum, u)
172
+ indices = cp.clip(indices, 0, n - 1)
173
+
174
+ return indices
175
+
176
+
177
+ @requires("cupy", extra="gpu", feature="GPU particle filter")
178
+ def gpu_resample_stratified(weights: ArrayLike) -> NDArray[np.intp]:
179
+ """
180
+ GPU-accelerated stratified resampling.
181
+
182
+ Stratified resampling divides the CDF into N equal strata and samples
183
+ one particle from each stratum.
184
+
185
+ Parameters
186
+ ----------
187
+ weights : array_like
188
+ Normalized particle weights, shape (n_particles,).
189
+
190
+ Returns
191
+ -------
192
+ indices : ndarray
193
+ Resampled particle indices, shape (n_particles,).
194
+ """
195
+ cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
196
+
197
+ w = ensure_gpu_array(weights, dtype=cp.float64)
198
+ n = len(w)
199
+
200
+ # Cumulative sum
201
+ cumsum = cp.cumsum(w)
202
+
203
+ # Stratified sampling: one random number per stratum
204
+ u = (cp.arange(n, dtype=cp.float64) + cp.random.uniform(0, 1, n)) / n
205
+
206
+ # Find indices
207
+ indices = cp.searchsorted(cumsum, u)
208
+ indices = cp.clip(indices, 0, n - 1)
209
+
210
+ return indices
211
+
212
+
213
+ @requires("cupy", extra="gpu", feature="GPU particle filter")
214
+ def gpu_normalize_weights(log_weights: ArrayLike) -> Tuple[NDArray, float]:
215
+ """
216
+ Normalize log weights to proper weights on GPU.
217
+
218
+ Uses log-sum-exp trick for numerical stability.
219
+
220
+ Parameters
221
+ ----------
222
+ log_weights : array_like
223
+ Unnormalized log weights, shape (n_particles,).
224
+
225
+ Returns
226
+ -------
227
+ weights : ndarray
228
+ Normalized weights, shape (n_particles,).
229
+ log_likelihood : float
230
+ Log of the normalization constant.
231
+ """
232
+ cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
233
+
234
+ log_w = ensure_gpu_array(log_weights, dtype=cp.float64)
235
+
236
+ # Log-sum-exp for numerical stability
237
+ max_log_w = cp.max(log_w)
238
+ log_sum = max_log_w + cp.log(cp.sum(cp.exp(log_w - max_log_w)))
239
+
240
+ # Normalized weights
241
+ weights = cp.exp(log_w - log_sum)
242
+
243
+ return weights, float(log_sum)
244
+
245
+
246
+ class CuPyParticleFilter:
247
+ """
248
+ GPU-accelerated Bootstrap Particle Filter.
249
+
250
+ This class implements the Sequential Importance Resampling (SIR)
251
+ particle filter with GPU acceleration.
252
+
253
+ Parameters
254
+ ----------
255
+ n_particles : int
256
+ Number of particles.
257
+ state_dim : int
258
+ Dimension of state vector.
259
+ resample_method : str
260
+ Resampling method: 'systematic', 'multinomial', or 'stratified'.
261
+ resample_threshold : float
262
+ ESS threshold for resampling (as fraction of n_particles).
263
+
264
+ Attributes
265
+ ----------
266
+ particles : cupy.ndarray
267
+ Current particle states, shape (n_particles, state_dim).
268
+ weights : cupy.ndarray
269
+ Current particle weights, shape (n_particles,).
270
+
271
+ Examples
272
+ --------
273
+ >>> import numpy as np
274
+ >>> from pytcl.gpu.particle_filter import CuPyParticleFilter
275
+ >>>
276
+ >>> # Initialize filter
277
+ >>> pf = CuPyParticleFilter(n_particles=10000, state_dim=4)
278
+ >>> pf.initialize(initial_state, initial_cov)
279
+ >>>
280
+ >>> # Run filter
281
+ >>> for measurement in measurements:
282
+ ... pf.predict(dynamics_fn)
283
+ ... pf.update(measurement, likelihood_fn)
284
+ ... state_estimate = pf.get_estimate()
285
+ """
286
+
287
+ @requires("cupy", extra="gpu", feature="GPU particle filter")
288
+ def __init__(
289
+ self,
290
+ n_particles: int,
291
+ state_dim: int,
292
+ resample_method: str = "systematic",
293
+ resample_threshold: float = 0.5,
294
+ ):
295
+ cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
296
+
297
+ self.n_particles = n_particles
298
+ self.state_dim = state_dim
299
+ self.resample_threshold = resample_threshold
300
+
301
+ # Select resampling function
302
+ if resample_method == "systematic":
303
+ self._resample_fn = gpu_resample_systematic
304
+ elif resample_method == "multinomial":
305
+ self._resample_fn = gpu_resample_multinomial
306
+ elif resample_method == "stratified":
307
+ self._resample_fn = gpu_resample_stratified
308
+ else:
309
+ raise ValueError(f"Unknown resample method: {resample_method}")
310
+
311
+ # Initialize particles and weights
312
+ self.particles = cp.zeros((n_particles, state_dim), dtype=cp.float64)
313
+ self.weights = cp.ones(n_particles, dtype=cp.float64) / n_particles
314
+
315
+ def initialize(
316
+ self,
317
+ mean: ArrayLike,
318
+ cov: ArrayLike,
319
+ ) -> None:
320
+ """
321
+ Initialize particles from Gaussian distribution.
322
+
323
+ Parameters
324
+ ----------
325
+ mean : array_like
326
+ Mean state, shape (state_dim,).
327
+ cov : array_like
328
+ Covariance matrix, shape (state_dim, state_dim).
329
+ """
330
+ cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
331
+
332
+ mean = np.asarray(mean).flatten()
333
+ cov = np.asarray(cov)
334
+
335
+ # Sample from multivariate normal on CPU (CuPy lacks this)
336
+ samples = np.random.multivariate_normal(mean, cov, self.n_particles)
337
+ self.particles = ensure_gpu_array(samples, dtype=cp.float64)
338
+ self.weights = cp.ones(self.n_particles, dtype=cp.float64) / self.n_particles
339
+
340
+ def initialize_uniform(
341
+ self,
342
+ low: ArrayLike,
343
+ high: ArrayLike,
344
+ ) -> None:
345
+ """
346
+ Initialize particles from uniform distribution.
347
+
348
+ Parameters
349
+ ----------
350
+ low : array_like
351
+ Lower bounds, shape (state_dim,).
352
+ high : array_like
353
+ Upper bounds, shape (state_dim,).
354
+ """
355
+ cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
356
+
357
+ low = ensure_gpu_array(low, dtype=cp.float64)
358
+ high = ensure_gpu_array(high, dtype=cp.float64)
359
+
360
+ # Sample uniformly
361
+ u = cp.random.uniform(0, 1, (self.n_particles, self.state_dim))
362
+ self.particles = low + u * (high - low)
363
+ self.weights = cp.ones(self.n_particles, dtype=cp.float64) / self.n_particles
364
+
365
+ def predict(
366
+ self,
367
+ dynamics_fn: Callable[[NDArray], NDArray],
368
+ *args,
369
+ **kwargs,
370
+ ) -> None:
371
+ """
372
+ Propagate particles through dynamics.
373
+
374
+ Parameters
375
+ ----------
376
+ dynamics_fn : callable
377
+ Function that takes particles (N, state_dim) and returns
378
+ propagated particles (N, state_dim).
379
+ *args, **kwargs
380
+ Additional arguments passed to dynamics_fn.
381
+
382
+ Notes
383
+ -----
384
+ The dynamics function receives CuPy arrays if GPU is available.
385
+ It should return arrays of the same type.
386
+ """
387
+ # Apply dynamics (may be on CPU or GPU depending on function)
388
+ self.particles = dynamics_fn(self.particles, *args, **kwargs)
389
+
390
+ def update(
391
+ self,
392
+ measurement: ArrayLike,
393
+ likelihood_fn: Callable[[NDArray, NDArray], NDArray],
394
+ ) -> float:
395
+ """
396
+ Update weights based on measurement likelihood.
397
+
398
+ Parameters
399
+ ----------
400
+ measurement : array_like
401
+ Measurement vector.
402
+ likelihood_fn : callable
403
+ Function that computes likelihood for each particle.
404
+ Takes (particles, measurement) and returns likelihoods (n_particles,).
405
+
406
+ Returns
407
+ -------
408
+ log_likelihood : float
409
+ Log of the marginal likelihood (normalization constant).
410
+ """
411
+ cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
412
+
413
+ z = ensure_gpu_array(measurement, dtype=cp.float64)
414
+
415
+ # Compute likelihoods
416
+ likelihoods = likelihood_fn(self.particles, z)
417
+ likelihoods = ensure_gpu_array(likelihoods, dtype=cp.float64)
418
+
419
+ # Update weights
420
+ log_weights = cp.log(self.weights) + cp.log(likelihoods + 1e-300)
421
+
422
+ # Normalize
423
+ self.weights, log_likelihood = gpu_normalize_weights(log_weights)
424
+
425
+ # Resample if ESS drops below threshold
426
+ ess = gpu_effective_sample_size(self.weights)
427
+ if ess < self.resample_threshold * self.n_particles:
428
+ self._resample()
429
+
430
+ return log_likelihood
431
+
432
+ def _resample(self) -> None:
433
+ """Perform resampling."""
434
+ cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
435
+
436
+ indices = self._resample_fn(self.weights)
437
+ self.particles = self.particles[indices]
438
+ self.weights = cp.ones(self.n_particles, dtype=cp.float64) / self.n_particles
439
+
440
+ def get_estimate(self) -> NDArray[np.floating]:
441
+ """
442
+ Compute weighted mean estimate.
443
+
444
+ Returns
445
+ -------
446
+ estimate : ndarray
447
+ Weighted mean state, shape (state_dim,).
448
+ """
449
+ cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
450
+ estimate = cp.sum(self.particles * self.weights[:, None], axis=0)
451
+ return estimate
452
+
453
+ def get_covariance(self) -> NDArray[np.floating]:
454
+ """
455
+ Compute weighted covariance estimate.
456
+
457
+ Returns
458
+ -------
459
+ cov : ndarray
460
+ Weighted covariance, shape (state_dim, state_dim).
461
+ """
462
+ cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
463
+
464
+ mean = self.get_estimate()
465
+ diff = self.particles - mean
466
+ cov = cp.einsum("n,ni,nj->ij", self.weights, diff, diff)
467
+ return cov
468
+
469
+ def get_ess(self) -> float:
470
+ """Get current effective sample size."""
471
+ return gpu_effective_sample_size(self.weights)
472
+
473
+ def get_state(self) -> ParticleFilterState:
474
+ """
475
+ Get current filter state.
476
+
477
+ Returns
478
+ -------
479
+ state : ParticleFilterState
480
+ Named tuple with particles, weights, and ESS.
481
+ """
482
+ return ParticleFilterState(
483
+ particles=self.particles,
484
+ weights=self.weights,
485
+ ess=self.get_ess(),
486
+ )
487
+
488
+ def get_particles_cpu(self) -> NDArray[np.floating]:
489
+ """Get particles on CPU."""
490
+ return to_cpu(self.particles)
491
+
492
+ def get_weights_cpu(self) -> NDArray[np.floating]:
493
+ """Get weights on CPU."""
494
+ return to_cpu(self.weights)
495
+
496
+
497
+ @requires("cupy", extra="gpu", feature="GPU particle filter")
498
+ def batch_particle_filter_update(
499
+ particles: ArrayLike,
500
+ weights: ArrayLike,
501
+ measurements: ArrayLike,
502
+ likelihood_fn: Callable[[NDArray, NDArray], NDArray],
503
+ ) -> Tuple[NDArray, NDArray, NDArray]:
504
+ """
505
+ Batch update for multiple particle filters.
506
+
507
+ Parameters
508
+ ----------
509
+ particles : array_like
510
+ Particle states, shape (n_filters, n_particles, state_dim).
511
+ weights : array_like
512
+ Particle weights, shape (n_filters, n_particles).
513
+ measurements : array_like
514
+ Measurements, shape (n_filters, meas_dim).
515
+ likelihood_fn : callable
516
+ Function that computes likelihood for each particle.
517
+
518
+ Returns
519
+ -------
520
+ weights_updated : ndarray
521
+ Updated weights.
522
+ log_likelihoods : ndarray
523
+ Log likelihoods for each filter.
524
+ ess : ndarray
525
+ Effective sample sizes.
526
+ """
527
+ cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
528
+
529
+ particles_gpu = ensure_gpu_array(particles, dtype=cp.float64)
530
+ weights_gpu = ensure_gpu_array(weights, dtype=cp.float64)
531
+ measurements_gpu = ensure_gpu_array(measurements, dtype=cp.float64)
532
+
533
+ n_filters = particles_gpu.shape[0]
534
+
535
+ weights_updated = cp.zeros_like(weights_gpu)
536
+ log_likelihoods = cp.zeros(n_filters, dtype=cp.float64)
537
+ ess = cp.zeros(n_filters, dtype=cp.float64)
538
+
539
+ for i in range(n_filters):
540
+ # Compute likelihoods
541
+ likelihoods = likelihood_fn(particles_gpu[i], measurements_gpu[i])
542
+ likelihoods = ensure_gpu_array(likelihoods, dtype=cp.float64)
543
+
544
+ # Update weights
545
+ log_weights = cp.log(weights_gpu[i]) + cp.log(likelihoods + 1e-300)
546
+
547
+ # Normalize
548
+ max_log_w = cp.max(log_weights)
549
+ log_sum = max_log_w + cp.log(cp.sum(cp.exp(log_weights - max_log_w)))
550
+ weights_updated[i] = cp.exp(log_weights - log_sum)
551
+ log_likelihoods[i] = log_sum
552
+
553
+ # ESS
554
+ ess[i] = 1.0 / cp.sum(weights_updated[i] ** 2)
555
+
556
+ return weights_updated, log_likelihoods, ess
557
+
558
+
559
+ __all__ = [
560
+ "ParticleFilterState",
561
+ "gpu_effective_sample_size",
562
+ "gpu_resample_systematic",
563
+ "gpu_resample_multinomial",
564
+ "gpu_resample_stratified",
565
+ "gpu_normalize_weights",
566
+ "CuPyParticleFilter",
567
+ "batch_particle_filter_update",
568
+ ]