nrl-tracker 0.21.4__py3-none-any.whl → 1.7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. {nrl_tracker-0.21.4.dist-info → nrl_tracker-1.7.5.dist-info}/METADATA +57 -10
  2. nrl_tracker-1.7.5.dist-info/RECORD +165 -0
  3. pytcl/__init__.py +4 -3
  4. pytcl/assignment_algorithms/__init__.py +28 -0
  5. pytcl/assignment_algorithms/data_association.py +2 -7
  6. pytcl/assignment_algorithms/gating.py +10 -10
  7. pytcl/assignment_algorithms/jpda.py +40 -40
  8. pytcl/assignment_algorithms/nd_assignment.py +379 -0
  9. pytcl/assignment_algorithms/network_flow.py +371 -0
  10. pytcl/assignment_algorithms/three_dimensional/assignment.py +3 -3
  11. pytcl/astronomical/__init__.py +162 -8
  12. pytcl/astronomical/ephemerides.py +533 -0
  13. pytcl/astronomical/reference_frames.py +865 -56
  14. pytcl/astronomical/relativity.py +473 -0
  15. pytcl/astronomical/sgp4.py +710 -0
  16. pytcl/astronomical/special_orbits.py +532 -0
  17. pytcl/astronomical/tle.py +558 -0
  18. pytcl/atmosphere/__init__.py +45 -3
  19. pytcl/atmosphere/ionosphere.py +512 -0
  20. pytcl/atmosphere/nrlmsise00.py +809 -0
  21. pytcl/clustering/dbscan.py +2 -2
  22. pytcl/clustering/gaussian_mixture.py +3 -3
  23. pytcl/clustering/hierarchical.py +15 -15
  24. pytcl/clustering/kmeans.py +4 -4
  25. pytcl/containers/__init__.py +28 -21
  26. pytcl/containers/base.py +219 -0
  27. pytcl/containers/cluster_set.py +2 -1
  28. pytcl/containers/covertree.py +26 -29
  29. pytcl/containers/kd_tree.py +94 -29
  30. pytcl/containers/measurement_set.py +1 -9
  31. pytcl/containers/rtree.py +200 -1
  32. pytcl/containers/vptree.py +21 -28
  33. pytcl/coordinate_systems/conversions/geodetic.py +272 -5
  34. pytcl/coordinate_systems/jacobians/jacobians.py +2 -2
  35. pytcl/coordinate_systems/projections/__init__.py +4 -2
  36. pytcl/coordinate_systems/projections/projections.py +2 -2
  37. pytcl/coordinate_systems/rotations/rotations.py +10 -6
  38. pytcl/core/__init__.py +18 -0
  39. pytcl/core/validation.py +333 -2
  40. pytcl/dynamic_estimation/__init__.py +26 -0
  41. pytcl/dynamic_estimation/gaussian_sum_filter.py +434 -0
  42. pytcl/dynamic_estimation/imm.py +15 -18
  43. pytcl/dynamic_estimation/kalman/__init__.py +30 -0
  44. pytcl/dynamic_estimation/kalman/constrained.py +382 -0
  45. pytcl/dynamic_estimation/kalman/extended.py +9 -12
  46. pytcl/dynamic_estimation/kalman/h_infinity.py +613 -0
  47. pytcl/dynamic_estimation/kalman/square_root.py +60 -573
  48. pytcl/dynamic_estimation/kalman/sr_ukf.py +302 -0
  49. pytcl/dynamic_estimation/kalman/ud_filter.py +410 -0
  50. pytcl/dynamic_estimation/kalman/unscented.py +9 -10
  51. pytcl/dynamic_estimation/particle_filters/bootstrap.py +15 -15
  52. pytcl/dynamic_estimation/rbpf.py +589 -0
  53. pytcl/dynamic_estimation/smoothers.py +1 -5
  54. pytcl/dynamic_models/discrete_time/__init__.py +1 -5
  55. pytcl/dynamic_models/process_noise/__init__.py +1 -5
  56. pytcl/gravity/egm.py +13 -0
  57. pytcl/gravity/spherical_harmonics.py +98 -37
  58. pytcl/gravity/tides.py +6 -6
  59. pytcl/logging_config.py +328 -0
  60. pytcl/magnetism/__init__.py +10 -14
  61. pytcl/magnetism/emm.py +10 -3
  62. pytcl/magnetism/wmm.py +260 -23
  63. pytcl/mathematical_functions/combinatorics/combinatorics.py +5 -5
  64. pytcl/mathematical_functions/geometry/geometry.py +5 -5
  65. pytcl/mathematical_functions/interpolation/__init__.py +2 -2
  66. pytcl/mathematical_functions/numerical_integration/quadrature.py +6 -6
  67. pytcl/mathematical_functions/signal_processing/detection.py +24 -24
  68. pytcl/mathematical_functions/signal_processing/filters.py +14 -14
  69. pytcl/mathematical_functions/signal_processing/matched_filter.py +12 -12
  70. pytcl/mathematical_functions/special_functions/__init__.py +2 -2
  71. pytcl/mathematical_functions/special_functions/bessel.py +15 -3
  72. pytcl/mathematical_functions/special_functions/debye.py +136 -26
  73. pytcl/mathematical_functions/special_functions/error_functions.py +3 -1
  74. pytcl/mathematical_functions/special_functions/gamma_functions.py +4 -4
  75. pytcl/mathematical_functions/special_functions/hypergeometric.py +81 -15
  76. pytcl/mathematical_functions/transforms/fourier.py +8 -8
  77. pytcl/mathematical_functions/transforms/stft.py +12 -12
  78. pytcl/mathematical_functions/transforms/wavelets.py +9 -9
  79. pytcl/navigation/__init__.py +14 -10
  80. pytcl/navigation/geodesy.py +246 -160
  81. pytcl/navigation/great_circle.py +101 -19
  82. pytcl/navigation/ins.py +1 -5
  83. pytcl/plotting/coordinates.py +7 -7
  84. pytcl/plotting/tracks.py +2 -2
  85. pytcl/static_estimation/maximum_likelihood.py +16 -14
  86. pytcl/static_estimation/robust.py +5 -5
  87. pytcl/terrain/loaders.py +5 -5
  88. pytcl/trackers/__init__.py +3 -14
  89. pytcl/trackers/hypothesis.py +1 -1
  90. pytcl/trackers/mht.py +9 -9
  91. pytcl/trackers/multi_target.py +2 -5
  92. nrl_tracker-0.21.4.dist-info/RECORD +0 -148
  93. {nrl_tracker-0.21.4.dist-info → nrl_tracker-1.7.5.dist-info}/LICENSE +0 -0
  94. {nrl_tracker-0.21.4.dist-info → nrl_tracker-1.7.5.dist-info}/WHEEL +0 -0
  95. {nrl_tracker-0.21.4.dist-info → nrl_tracker-1.7.5.dist-info}/top_level.txt +0 -0
@@ -11,11 +11,69 @@ References
11
11
  .. [2] O. Montenbruck and E. Gill, "Satellite Orbits," Springer, 2000.
12
12
  """
13
13
 
14
- from typing import Optional, Tuple
14
+ import logging
15
+ from functools import lru_cache
16
+ from typing import Any, Optional, Tuple
15
17
 
16
18
  import numpy as np
17
19
  from numpy.typing import NDArray
18
20
 
21
+ # Module logger
22
+ _logger = logging.getLogger("pytcl.gravity.spherical_harmonics")
23
+
24
+ # Cache configuration for Legendre polynomials
25
+ _LEGENDRE_CACHE_DECIMALS = 8 # Precision for x quantization
26
+ _LEGENDRE_CACHE_MAXSIZE = 64 # Max cached (n_max, m_max, x) combinations
27
+
28
+
29
+ def _quantize_x(x: float) -> float:
30
+ """Quantize x value for cache key compatibility."""
31
+ return round(x, _LEGENDRE_CACHE_DECIMALS)
32
+
33
+
34
+ @lru_cache(maxsize=_LEGENDRE_CACHE_MAXSIZE)
35
+ def _associated_legendre_cached(
36
+ n_max: int,
37
+ m_max: int,
38
+ x_quantized: float,
39
+ normalized: bool,
40
+ ) -> tuple[tuple[np.ndarray[Any, Any], ...], ...]:
41
+ """Cached Legendre polynomial computation (internal).
42
+
43
+ Returns tuple of tuples for hashability.
44
+ """
45
+ P = np.zeros((n_max + 1, m_max + 1))
46
+ u = np.sqrt(1 - x_quantized * x_quantized)
47
+
48
+ P[0, 0] = 1.0
49
+
50
+ for m in range(1, m_max + 1):
51
+ if normalized:
52
+ P[m, m] = u * np.sqrt((2 * m + 1) / (2 * m)) * P[m - 1, m - 1]
53
+ else:
54
+ P[m, m] = (2 * m - 1) * u * P[m - 1, m - 1]
55
+
56
+ for m in range(m_max):
57
+ if m + 1 <= n_max:
58
+ if normalized:
59
+ P[m + 1, m] = x_quantized * np.sqrt(2 * m + 3) * P[m, m]
60
+ else:
61
+ P[m + 1, m] = x_quantized * (2 * m + 1) * P[m, m]
62
+
63
+ for m in range(m_max + 1):
64
+ for n in range(m + 2, n_max + 1):
65
+ if normalized:
66
+ a_nm = np.sqrt((4 * n * n - 1) / (n * n - m * m))
67
+ b_nm = np.sqrt(((n - 1) ** 2 - m * m) / (4 * (n - 1) ** 2 - 1))
68
+ P[n, m] = a_nm * (x_quantized * P[n - 1, m] - b_nm * P[n - 2, m])
69
+ else:
70
+ P[n, m] = (
71
+ (2 * n - 1) * x_quantized * P[n - 1, m] - (n + m - 1) * P[n - 2, m]
72
+ ) / (n - m)
73
+
74
+ # Convert to tuple of tuples for hashability
75
+ return tuple(tuple(row) for row in P)
76
+
19
77
 
20
78
  def associated_legendre(
21
79
  n_max: int,
@@ -53,6 +111,9 @@ def associated_legendre(
53
111
 
54
112
  \\int_{-1}^{1} [\\bar{P}_n^m(x)]^2 dx = \\frac{2}{2n+1}
55
113
 
114
+ Results are cached for repeated queries with the same parameters.
115
+ Cache key quantizes x to 8 decimal places (~1e-8 precision).
116
+
56
117
  Examples
57
118
  --------
58
119
  >>> P = associated_legendre(2, 2, 0.5)
@@ -63,42 +124,10 @@ def associated_legendre(
63
124
  if not -1 <= x <= 1:
64
125
  raise ValueError("x must be in [-1, 1]")
65
126
 
66
- P = np.zeros((n_max + 1, m_max + 1))
67
-
68
- # Compute sqrt(1 - x^2) = sin(theta) for colatitude
69
- u = np.sqrt(1 - x * x)
70
-
71
- # Seed values
72
- P[0, 0] = 1.0
73
-
74
- # Sectoral recursion: P_m^m from P_{m-1}^{m-1}
75
- for m in range(1, m_max + 1):
76
- if normalized:
77
- P[m, m] = u * np.sqrt((2 * m + 1) / (2 * m)) * P[m - 1, m - 1]
78
- else:
79
- P[m, m] = (2 * m - 1) * u * P[m - 1, m - 1]
80
-
81
- # Compute P_{m+1}^m from P_m^m
82
- for m in range(m_max):
83
- if m + 1 <= n_max:
84
- if normalized:
85
- P[m + 1, m] = x * np.sqrt(2 * m + 3) * P[m, m]
86
- else:
87
- P[m + 1, m] = x * (2 * m + 1) * P[m, m]
88
-
89
- # General recursion: P_n^m from P_{n-1}^m and P_{n-2}^m
90
- for m in range(m_max + 1):
91
- for n in range(m + 2, n_max + 1):
92
- if normalized:
93
- a_nm = np.sqrt((4 * n * n - 1) / (n * n - m * m))
94
- b_nm = np.sqrt(((n - 1) ** 2 - m * m) / (4 * (n - 1) ** 2 - 1))
95
- P[n, m] = a_nm * (x * P[n - 1, m] - b_nm * P[n - 2, m])
96
- else:
97
- P[n, m] = (
98
- (2 * n - 1) * x * P[n - 1, m] - (n + m - 1) * P[n - 2, m]
99
- ) / (n - m)
100
-
101
- return P
127
+ # Use cached computation
128
+ x_q = _quantize_x(x)
129
+ cached = _associated_legendre_cached(n_max, m_max, x_q, normalized)
130
+ return np.array(cached)
102
131
 
103
132
 
104
133
  def associated_legendre_derivative(
@@ -230,6 +259,14 @@ def spherical_harmonic_sum(
230
259
  if n_max is None:
231
260
  n_max = C.shape[0] - 1
232
261
 
262
+ _logger.debug(
263
+ "spherical_harmonic_sum: lat=%.4f, lon=%.4f, r=%.1f, n_max=%d",
264
+ lat,
265
+ lon,
266
+ r,
267
+ n_max,
268
+ )
269
+
233
270
  # Colatitude for Legendre polynomials
234
271
  colat = np.pi / 2 - lat
235
272
  cos_colat = np.cos(colat)
@@ -495,6 +532,28 @@ def associated_legendre_scaled(
495
532
  return P_scaled, scale_exp
496
533
 
497
534
 
535
+ def clear_legendre_cache() -> None:
536
+ """Clear cached Legendre polynomial results.
537
+
538
+ Call this function to clear the cached associated Legendre
539
+ polynomial arrays. Useful when memory is constrained or after
540
+ processing a batch with different colatitude values.
541
+ """
542
+ _associated_legendre_cached.cache_clear()
543
+ _logger.debug("Legendre polynomial cache cleared")
544
+
545
+
546
+ def get_legendre_cache_info() -> Any:
547
+ """Get cache statistics for Legendre polynomials.
548
+
549
+ Returns
550
+ -------
551
+ CacheInfo
552
+ Named tuple with hits, misses, maxsize, currsize.
553
+ """
554
+ return _associated_legendre_cached.cache_info()
555
+
556
+
498
557
  __all__ = [
499
558
  "associated_legendre",
500
559
  "associated_legendre_derivative",
@@ -502,4 +561,6 @@ __all__ = [
502
561
  "gravity_acceleration",
503
562
  "legendre_scaling_factors",
504
563
  "associated_legendre_scaled",
564
+ "clear_legendre_cache",
565
+ "get_legendre_cache_info",
505
566
  ]
pytcl/gravity/tides.py CHANGED
@@ -77,9 +77,9 @@ class OceanTideLoading(NamedTuple):
77
77
  Names of tidal constituents.
78
78
  """
79
79
 
80
- amplitude: NDArray
81
- phase: NDArray
82
- constituents: Tuple[str, ...]
80
+ amplitude: NDArray[np.floating]
81
+ phase: NDArray[np.floating]
82
+ constituents: tuple[str, ...]
83
83
 
84
84
 
85
85
  # Love and Shida numbers for degree 2 (IERS 2010)
@@ -593,9 +593,9 @@ def solid_earth_tide_gravity(
593
593
 
594
594
  def ocean_tide_loading_displacement(
595
595
  mjd: float,
596
- amplitude: NDArray,
597
- phase: NDArray,
598
- constituents: Tuple[str, ...] = ("M2", "S2", "N2", "K2", "K1", "O1", "P1", "Q1"),
596
+ amplitude: NDArray[np.floating],
597
+ phase: NDArray[np.floating],
598
+ constituents: tuple[str, ...] = ("M2", "S2", "N2", "K2", "K1", "O1", "P1", "Q1"),
599
599
  ) -> TidalDisplacement:
600
600
  """
601
601
  Compute ocean tide loading displacement.
@@ -0,0 +1,328 @@
1
+ """
2
+ Hierarchical logging configuration for pyTCL.
3
+
4
+ Provides:
5
+ - Hierarchical loggers (pytcl.estimation, pytcl.assignment, etc.)
6
+ - Performance instrumentation decorators
7
+ - Context managers for timing critical sections
8
+ - Configurable output formats and levels
9
+
10
+ Usage
11
+ -----
12
+ >>> from pytcl.logging_config import get_logger, timed, TimingContext
13
+ >>> logger = get_logger(__name__)
14
+ >>> logger.debug("Processing measurement batch")
15
+
16
+ >>> @timed(logger, "kf_predict")
17
+ ... def kf_predict(x, P, F, Q):
18
+ ... ...
19
+
20
+ >>> with TimingContext(logger, "update_loop"):
21
+ ... for _ in range(100):
22
+ ... do_update()
23
+ """
24
+
25
+ import functools
26
+ import logging
27
+ import time
28
+ from contextlib import contextmanager
29
+ from typing import Any, Callable, Generator, Optional, TypeVar
30
+
31
+ # Type variable for decorated functions
32
+ F = TypeVar("F", bound=Callable[..., Any])
33
+
34
+
35
+ # =============================================================================
36
+ # Logger Configuration
37
+ # =============================================================================
38
+
39
+ # Root logger for pytcl namespace
40
+ PYTCL_LOGGER = "pytcl"
41
+
42
+ # Sub-loggers for major components
43
+ LOGGER_HIERARCHY = {
44
+ "pytcl.estimation": "Dynamic estimation algorithms (Kalman, IMM, particle)",
45
+ "pytcl.assignment": "Assignment and data association (gating, JPDA, MHT)",
46
+ "pytcl.signal": "Signal processing functions (CFAR, matched filter)",
47
+ "pytcl.coordinate": "Coordinate system operations (rotations, conversions)",
48
+ "pytcl.containers": "Data containers and structures (TrackList, KDTree)",
49
+ "pytcl.math": "Mathematical functions (special functions, transforms)",
50
+ "pytcl.perf": "Performance instrumentation and timing",
51
+ }
52
+
53
+ # Default format strings
54
+ FORMATS = {
55
+ "detailed": (
56
+ "%(asctime)s - %(name)s - %(levelname)s - "
57
+ "%(funcName)s:%(lineno)d - %(message)s"
58
+ ),
59
+ "simple": "%(name)s - %(levelname)s - %(message)s",
60
+ "performance": "%(asctime)s - PERF - %(name)s - %(message)s",
61
+ "minimal": "%(levelname)s: %(message)s",
62
+ }
63
+
64
+
65
+ def configure_logging(
66
+ level: int = logging.WARNING,
67
+ format_style: str = "simple",
68
+ handler: Optional[logging.Handler] = None,
69
+ ) -> logging.Logger:
70
+ """
71
+ Configure the pytcl logging hierarchy.
72
+
73
+ Parameters
74
+ ----------
75
+ level : int
76
+ Logging level (e.g., logging.DEBUG, logging.INFO).
77
+ format_style : str
78
+ One of 'detailed', 'simple', 'performance', 'minimal'.
79
+ handler : logging.Handler, optional
80
+ Custom handler. If None, uses StreamHandler.
81
+
82
+ Returns
83
+ -------
84
+ logging.Logger
85
+ The root pytcl logger.
86
+
87
+ Examples
88
+ --------
89
+ >>> import logging
90
+ >>> from pytcl.logging_config import configure_logging
91
+ >>> configure_logging(level=logging.DEBUG, format_style="detailed")
92
+ """
93
+ root = logging.getLogger(PYTCL_LOGGER)
94
+ root.setLevel(level)
95
+
96
+ # Clear existing handlers
97
+ root.handlers.clear()
98
+
99
+ # Create handler if not provided
100
+ if handler is None:
101
+ handler = logging.StreamHandler()
102
+
103
+ # Set format
104
+ fmt = FORMATS.get(format_style, FORMATS["simple"])
105
+ formatter = logging.Formatter(fmt)
106
+ handler.setFormatter(formatter)
107
+ handler.setLevel(level)
108
+
109
+ root.addHandler(handler)
110
+
111
+ return root
112
+
113
+
114
+ def get_logger(name: str) -> logging.Logger:
115
+ """
116
+ Get a logger in the pytcl hierarchy.
117
+
118
+ Parameters
119
+ ----------
120
+ name : str
121
+ Logger name. If starts with 'pytcl.', used as-is.
122
+ Otherwise, 'pytcl.' is prepended.
123
+
124
+ Returns
125
+ -------
126
+ logging.Logger
127
+ Logger instance.
128
+
129
+ Examples
130
+ --------
131
+ >>> logger = get_logger("dynamic_estimation.kalman")
132
+ >>> logger.name
133
+ 'pytcl.dynamic_estimation.kalman'
134
+ """
135
+ if not name.startswith(PYTCL_LOGGER):
136
+ name = f"{PYTCL_LOGGER}.{name}"
137
+ return logging.getLogger(name)
138
+
139
+
140
+ # =============================================================================
141
+ # Performance Instrumentation
142
+ # =============================================================================
143
+
144
+ # Performance logger
145
+ _perf_logger = logging.getLogger(f"{PYTCL_LOGGER}.perf")
146
+
147
+
148
+ def timed(
149
+ logger: Optional[logging.Logger] = None,
150
+ name: Optional[str] = None,
151
+ level: int = logging.DEBUG,
152
+ ) -> Callable[[F], F]:
153
+ """
154
+ Decorator to time function execution.
155
+
156
+ Parameters
157
+ ----------
158
+ logger : logging.Logger, optional
159
+ Logger to use. Defaults to pytcl.perf.
160
+ name : str, optional
161
+ Name to use in log message. Defaults to function name.
162
+ level : int
163
+ Logging level. Default is DEBUG.
164
+
165
+ Returns
166
+ -------
167
+ callable
168
+ Decorated function.
169
+
170
+ Examples
171
+ --------
172
+ >>> @timed(logger, "kf_predict")
173
+ ... def kf_predict(x, P, F, Q):
174
+ ... return do_prediction(x, P, F, Q)
175
+ """
176
+
177
+ def decorator(func: F) -> F:
178
+ log = logger or _perf_logger
179
+ func_name = name or func.__name__
180
+
181
+ @functools.wraps(func)
182
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
183
+ start = time.perf_counter()
184
+ try:
185
+ result = func(*args, **kwargs)
186
+ elapsed = (time.perf_counter() - start) * 1000
187
+ log.log(level, "%s completed in %.3fms", func_name, elapsed)
188
+ return result
189
+ except Exception as e:
190
+ elapsed = (time.perf_counter() - start) * 1000
191
+ log.log(level, "%s failed after %.3fms: %s", func_name, elapsed, e)
192
+ raise
193
+
194
+ return wrapper
195
+
196
+ return decorator
197
+
198
+
199
+ @contextmanager
200
+ def TimingContext(
201
+ logger: Optional[logging.Logger] = None,
202
+ name: str = "operation",
203
+ level: int = logging.DEBUG,
204
+ ) -> Generator[None, None, None]:
205
+ """
206
+ Context manager for timing code blocks.
207
+
208
+ Parameters
209
+ ----------
210
+ logger : logging.Logger, optional
211
+ Logger to use. Defaults to pytcl.perf.
212
+ name : str
213
+ Name for the operation being timed.
214
+ level : int
215
+ Logging level.
216
+
217
+ Yields
218
+ ------
219
+ dict
220
+ Dictionary that will contain 'elapsed_ms' after context exits.
221
+
222
+ Examples
223
+ --------
224
+ >>> with TimingContext(logger, "update_loop") as timing:
225
+ ... for _ in range(100):
226
+ ... do_update()
227
+ >>> print(f"Elapsed: {timing['elapsed_ms']:.2f}ms")
228
+ """
229
+ log = logger or _perf_logger
230
+ timing: dict[str, float] = {"elapsed_ms": 0.0}
231
+ start = time.perf_counter()
232
+ try:
233
+ yield timing
234
+ finally:
235
+ timing["elapsed_ms"] = (time.perf_counter() - start) * 1000
236
+ log.log(level, "%s completed in %.3fms", name, timing["elapsed_ms"])
237
+
238
+
239
+ class PerformanceTracker:
240
+ """
241
+ Track cumulative performance statistics.
242
+
243
+ Useful for tracking performance across many iterations without
244
+ logging each one individually.
245
+
246
+ Parameters
247
+ ----------
248
+ name : str
249
+ Name for the tracked operation.
250
+ logger : logging.Logger, optional
251
+ Logger to use. Defaults to pytcl.perf.
252
+
253
+ Examples
254
+ --------
255
+ >>> tracker = PerformanceTracker("filter_cycles")
256
+ >>> for _ in range(1000):
257
+ ... with tracker.track():
258
+ ... do_filter_step()
259
+ >>> tracker.log_summary()
260
+ """
261
+
262
+ def __init__(self, name: str, logger: Optional[logging.Logger] = None):
263
+ self.name = name
264
+ self.logger = logger or _perf_logger
265
+ self.count = 0
266
+ self.total_ms = 0.0
267
+ self.min_ms = float("inf")
268
+ self.max_ms = 0.0
269
+
270
+ @contextmanager
271
+ def track(self) -> Generator[None, None, None]:
272
+ """Track a single operation."""
273
+ start = time.perf_counter()
274
+ try:
275
+ yield
276
+ finally:
277
+ elapsed = (time.perf_counter() - start) * 1000
278
+ self.count += 1
279
+ self.total_ms += elapsed
280
+ self.min_ms = min(self.min_ms, elapsed)
281
+ self.max_ms = max(self.max_ms, elapsed)
282
+
283
+ @property
284
+ def mean_ms(self) -> float:
285
+ """Get mean execution time."""
286
+ return self.total_ms / self.count if self.count > 0 else 0.0
287
+
288
+ def log_summary(self, level: int = logging.INFO) -> None:
289
+ """Log performance summary."""
290
+ if self.count == 0:
291
+ self.logger.log(level, "%s: no data", self.name)
292
+ return
293
+
294
+ self.logger.log(
295
+ level,
296
+ "%s: count=%d, mean=%.3fms, min=%.3fms, max=%.3fms, total=%.1fms",
297
+ self.name,
298
+ self.count,
299
+ self.mean_ms,
300
+ self.min_ms,
301
+ self.max_ms,
302
+ self.total_ms,
303
+ )
304
+
305
+ def reset(self) -> None:
306
+ """Reset statistics."""
307
+ self.count = 0
308
+ self.total_ms = 0.0
309
+ self.min_ms = float("inf")
310
+ self.max_ms = 0.0
311
+
312
+ def __repr__(self) -> str:
313
+ return (
314
+ f"PerformanceTracker(name={self.name!r}, count={self.count}, "
315
+ f"mean_ms={self.mean_ms:.3f})"
316
+ )
317
+
318
+
319
+ __all__ = [
320
+ "configure_logging",
321
+ "get_logger",
322
+ "timed",
323
+ "TimingContext",
324
+ "PerformanceTracker",
325
+ "PYTCL_LOGGER",
326
+ "LOGGER_HIERARCHY",
327
+ "FORMATS",
328
+ ]
@@ -25,22 +25,11 @@ Examples
25
25
  >>> coef = create_emm_test_coefficients(n_max=36)
26
26
  """
27
27
 
28
- from pytcl.magnetism.emm import (
29
- EMM_PARAMETERS,
30
- HighResCoefficients,
31
- )
28
+ from pytcl.magnetism.emm import EMM_PARAMETERS, HighResCoefficients
32
29
  from pytcl.magnetism.emm import create_test_coefficients as create_emm_test_coefficients
33
- from pytcl.magnetism.emm import (
34
- emm,
35
- emm_declination,
36
- emm_inclination,
37
- emm_intensity,
38
- )
30
+ from pytcl.magnetism.emm import emm, emm_declination, emm_inclination, emm_intensity
39
31
  from pytcl.magnetism.emm import get_data_dir as get_emm_data_dir
40
- from pytcl.magnetism.emm import (
41
- load_emm_coefficients,
42
- wmmhr,
43
- )
32
+ from pytcl.magnetism.emm import load_emm_coefficients, wmmhr
44
33
  from pytcl.magnetism.igrf import (
45
34
  IGRF13,
46
35
  IGRFModel,
@@ -56,7 +45,10 @@ from pytcl.magnetism.wmm import (
56
45
  WMM2020,
57
46
  MagneticCoefficients,
58
47
  MagneticResult,
48
+ clear_magnetic_cache,
49
+ configure_magnetic_cache,
59
50
  create_wmm2020_coefficients,
51
+ get_magnetic_cache_info,
60
52
  magnetic_declination,
61
53
  magnetic_field_intensity,
62
54
  magnetic_field_spherical,
@@ -77,6 +69,10 @@ __all__ = [
77
69
  "magnetic_declination",
78
70
  "magnetic_inclination",
79
71
  "magnetic_field_intensity",
72
+ # Cache management
73
+ "get_magnetic_cache_info",
74
+ "clear_magnetic_cache",
75
+ "configure_magnetic_cache",
80
76
  # IGRF
81
77
  "IGRF13",
82
78
  "create_igrf13_coefficients",
pytcl/magnetism/emm.py CHANGED
@@ -24,7 +24,7 @@ References
24
24
  import os
25
25
  from functools import lru_cache
26
26
  from pathlib import Path
27
- from typing import Dict, NamedTuple, Optional, Tuple
27
+ from typing import Any, NamedTuple, Optional, Tuple
28
28
 
29
29
  import numpy as np
30
30
  from numpy.typing import NDArray
@@ -32,7 +32,7 @@ from numpy.typing import NDArray
32
32
  from .wmm import MagneticResult
33
33
 
34
34
  # Model parameters
35
- EMM_PARAMETERS: Dict[str, Dict] = {
35
+ EMM_PARAMETERS: dict[str, dict[str, Any]] = {
36
36
  "EMM2017": {
37
37
  "n_max": 790,
38
38
  "epoch": 2017.0,
@@ -119,7 +119,14 @@ def _ensure_data_dir() -> Path:
119
119
  def parse_emm_file(
120
120
  filepath: Path,
121
121
  n_max: Optional[int] = None,
122
- ) -> Tuple[NDArray, NDArray, NDArray, NDArray, float, int]:
122
+ ) -> tuple[
123
+ NDArray[np.floating],
124
+ NDArray[np.floating],
125
+ NDArray[np.floating],
126
+ NDArray[np.floating],
127
+ float,
128
+ int,
129
+ ]:
123
130
  """Parse an EMM/WMMHR coefficient file.
124
131
 
125
132
  The file format is similar to WMM but with more coefficients: