sarpyx 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docs/examples/advanced/batch_processing.py +1 -1
- docs/examples/advanced/custom_processing_chains.py +1 -1
- docs/examples/advanced/performance_optimization.py +1 -1
- docs/examples/basic/snap_integration.py +1 -1
- docs/examples/intermediate/quality_assessment.py +1 -1
- outputs/baseline/20260205-234828/__init__.py +33 -0
- outputs/baseline/20260205-234828/main.py +493 -0
- outputs/final/20260205-234851/__init__.py +33 -0
- outputs/final/20260205-234851/main.py +493 -0
- sarpyx/__init__.py +2 -2
- sarpyx/algorithms/__init__.py +2 -2
- sarpyx/cli/__init__.py +1 -1
- sarpyx/cli/focus.py +3 -5
- sarpyx/cli/main.py +106 -7
- sarpyx/cli/shipdet.py +1 -1
- sarpyx/cli/worldsar.py +549 -0
- sarpyx/processor/__init__.py +1 -1
- sarpyx/processor/core/decode.py +43 -8
- sarpyx/processor/core/focus.py +104 -57
- sarpyx/science/__init__.py +1 -1
- sarpyx/sla/__init__.py +8 -0
- sarpyx/sla/metrics.py +101 -0
- sarpyx/{snap → snapflow}/__init__.py +1 -1
- sarpyx/snapflow/engine.py +6165 -0
- sarpyx/{snap → snapflow}/op.py +0 -1
- sarpyx/utils/__init__.py +1 -1
- sarpyx/utils/geos.py +652 -0
- sarpyx/utils/grid.py +285 -0
- sarpyx/utils/io.py +77 -9
- sarpyx/utils/meta.py +55 -0
- sarpyx/utils/nisar_utils.py +652 -0
- sarpyx/utils/rfigen.py +108 -0
- sarpyx/utils/wkt_utils.py +109 -0
- sarpyx/utils/zarr_utils.py +55 -37
- {sarpyx-0.1.5.dist-info → sarpyx-0.1.6.dist-info}/METADATA +9 -5
- {sarpyx-0.1.5.dist-info → sarpyx-0.1.6.dist-info}/RECORD +41 -32
- {sarpyx-0.1.5.dist-info → sarpyx-0.1.6.dist-info}/WHEEL +1 -1
- sarpyx-0.1.6.dist-info/licenses/LICENSE +201 -0
- sarpyx-0.1.6.dist-info/top_level.txt +4 -0
- tests/test_zarr_compat.py +35 -0
- sarpyx/processor/core/decode_v0.py +0 -0
- sarpyx/processor/core/decode_v1.py +0 -849
- sarpyx/processor/core/focus_old.py +0 -1550
- sarpyx/processor/core/focus_v1.py +0 -1566
- sarpyx/processor/core/focus_v2.py +0 -1625
- sarpyx/snap/engine.py +0 -633
- sarpyx-0.1.5.dist-info/top_level.txt +0 -2
- {sarpyx-0.1.5.dist-info → sarpyx-0.1.6.dist-info}/entry_points.txt +0 -0
|
@@ -1,1550 +0,0 @@
|
|
|
1
|
-
import argparse
|
|
2
|
-
from typing import Dict, Any, Optional, Union, Tuple, Callable, List
|
|
3
|
-
try:
|
|
4
|
-
import torch
|
|
5
|
-
except ImportError:
|
|
6
|
-
print('Unable to import torch module')
|
|
7
|
-
torch = None
|
|
8
|
-
import pickle
|
|
9
|
-
import pandas as pd
|
|
10
|
-
import numpy as np
|
|
11
|
-
from scipy.interpolate import interp1d
|
|
12
|
-
import math
|
|
13
|
-
from pathlib import Path
|
|
14
|
-
import matplotlib.pyplot as plt
|
|
15
|
-
import copy
|
|
16
|
-
import gc
|
|
17
|
-
from functools import wraps
|
|
18
|
-
import psutil
|
|
19
|
-
import time
|
|
20
|
-
from os import environ
|
|
21
|
-
|
|
22
|
-
try:
|
|
23
|
-
import zarr
|
|
24
|
-
import numcodecs
|
|
25
|
-
ZARR_AVAILABLE = True
|
|
26
|
-
except ImportError:
|
|
27
|
-
print('Warning: zarr not available, falling back to pickle for saving')
|
|
28
|
-
ZARR_AVAILABLE = False
|
|
29
|
-
|
|
30
|
-
# ---------- Import custom modules ----------
|
|
31
|
-
from .code2physical import range_dec_to_sample_rate
|
|
32
|
-
from .transforms import perform_fft_custom
|
|
33
|
-
from . import constants as cnst
|
|
34
|
-
from ..utils.viz import dump
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
# ---------- Global settings ----------
|
|
38
|
-
environ['OMP_NUM_THREADS'] = '12' # Set OpenMP threads for parallel processing
|
|
39
|
-
__VTIMING__ = False
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
# ---------- Decorators and utility functions ----------
|
|
44
|
-
def timing_decorator(func: Callable) -> Callable:
|
|
45
|
-
"""Decorator to measure and print function execution time.
|
|
46
|
-
|
|
47
|
-
Args:
|
|
48
|
-
func: The function to measure.
|
|
49
|
-
|
|
50
|
-
Returns:
|
|
51
|
-
The wrapped function with timing measurement.
|
|
52
|
-
"""
|
|
53
|
-
@wraps(func)
|
|
54
|
-
def wrapper(*args, **kwargs):
|
|
55
|
-
start_time = time.time()
|
|
56
|
-
result = func(*args, **kwargs)
|
|
57
|
-
elapsed_time = time.time() - start_time
|
|
58
|
-
if __VTIMING__:
|
|
59
|
-
print(f'Elapsed time for {func.__name__}: {elapsed_time:.4f} seconds')
|
|
60
|
-
else:
|
|
61
|
-
# Only print if __VTIMING__ is enabled
|
|
62
|
-
pass
|
|
63
|
-
return result
|
|
64
|
-
return wrapper
|
|
65
|
-
|
|
66
|
-
def print_memory() -> None:
|
|
67
|
-
"""Print current RAM memory usage percentage."""
|
|
68
|
-
print(f'RAM memory usage: {psutil.virtual_memory().percent}%')
|
|
69
|
-
|
|
70
|
-
def flush_mem(func: Callable) -> Callable:
|
|
71
|
-
"""Decorator for memory-efficient operations with monitoring.
|
|
72
|
-
|
|
73
|
-
Args:
|
|
74
|
-
func: The function to wrap.
|
|
75
|
-
|
|
76
|
-
Returns:
|
|
77
|
-
The wrapped function with memory monitoring and cleanup.
|
|
78
|
-
"""
|
|
79
|
-
@wraps(func)
|
|
80
|
-
def wrapper(*args, **kwargs):
|
|
81
|
-
# Monitor memory before
|
|
82
|
-
initial_memory = psutil.virtual_memory().percent
|
|
83
|
-
|
|
84
|
-
# Execute function
|
|
85
|
-
result = func(*args, **kwargs)
|
|
86
|
-
|
|
87
|
-
# Force garbage collection
|
|
88
|
-
gc.collect()
|
|
89
|
-
|
|
90
|
-
# Monitor memory after
|
|
91
|
-
final_memory = psutil.virtual_memory().percent
|
|
92
|
-
|
|
93
|
-
# Print memory change if verbose
|
|
94
|
-
if hasattr(args[0], '_verbose') and args[0]._verbose:
|
|
95
|
-
print(f'Memory usage: {initial_memory:.1f}% -> {final_memory:.1f}% '
|
|
96
|
-
f'(Δ{final_memory - initial_memory:+.1f}%)')
|
|
97
|
-
|
|
98
|
-
return result
|
|
99
|
-
return wrapper
|
|
100
|
-
|
|
101
|
-
def cleanup_variables(*variables: Any) -> None:
|
|
102
|
-
"""Explicitly delete variables and run garbage collection.
|
|
103
|
-
|
|
104
|
-
Args:
|
|
105
|
-
*variables: Variables to delete.
|
|
106
|
-
"""
|
|
107
|
-
for var in variables:
|
|
108
|
-
del var
|
|
109
|
-
gc.collect()
|
|
110
|
-
|
|
111
|
-
def initialize_params(
|
|
112
|
-
device: Optional[torch.device] = None,
|
|
113
|
-
slant_range_vec: Optional[np.ndarray] = None,
|
|
114
|
-
D: Optional[np.ndarray] = None,
|
|
115
|
-
c: Optional[float] = None,
|
|
116
|
-
len_range_line: Optional[int] = None,
|
|
117
|
-
range_sample_freq: Optional[float] = None,
|
|
118
|
-
wavelength: Optional[float] = None
|
|
119
|
-
) -> Dict[str, Any]:
|
|
120
|
-
"""Initialize processing parameters dictionary.
|
|
121
|
-
|
|
122
|
-
Args:
|
|
123
|
-
device: PyTorch device for computation.
|
|
124
|
-
slant_range_vec: Slant range vector.
|
|
125
|
-
D: Cosine of instantaneous squint angle.
|
|
126
|
-
c: Speed of light.
|
|
127
|
-
len_range_line: Length of range line.
|
|
128
|
-
range_sample_freq: Range sampling frequency.
|
|
129
|
-
wavelength: Radar wavelength.
|
|
130
|
-
|
|
131
|
-
Returns:
|
|
132
|
-
Dictionary containing all parameters.
|
|
133
|
-
"""
|
|
134
|
-
return {key: value for key, value in locals().items()}
|
|
135
|
-
|
|
136
|
-
def multiply_inplace(
|
|
137
|
-
a: Union[np.ndarray, torch.Tensor],
|
|
138
|
-
b: Union[np.ndarray, torch.Tensor]
|
|
139
|
-
) -> Union[np.ndarray, torch.Tensor]:
|
|
140
|
-
"""Multiply two arrays element-wise in-place with broadcasting support.
|
|
141
|
-
|
|
142
|
-
Args:
|
|
143
|
-
a: First array (modified in-place).
|
|
144
|
-
b: Second array.
|
|
145
|
-
|
|
146
|
-
Returns:
|
|
147
|
-
Reference to modified first array.
|
|
148
|
-
|
|
149
|
-
Raises:
|
|
150
|
-
ValueError: If arrays have incompatible shapes for broadcasting.
|
|
151
|
-
"""
|
|
152
|
-
if hasattr(a, 'shape') and hasattr(b, 'shape'):
|
|
153
|
-
# Check if shapes are compatible for broadcasting
|
|
154
|
-
if a.shape != b.shape and b.size != 1 and a.size != 1:
|
|
155
|
-
# For 2D array * 1D array, the 1D array should match one of the 2D dimensions
|
|
156
|
-
if len(a.shape) == 2 and len(b.shape) == 1:
|
|
157
|
-
if b.shape[0] == a.shape[1]:
|
|
158
|
-
# Broadcasting along range dimension - use numpy broadcasting
|
|
159
|
-
pass # NumPy will handle this automatically
|
|
160
|
-
elif b.shape[0] == a.shape[0]:
|
|
161
|
-
# Need to reshape for azimuth dimension broadcasting
|
|
162
|
-
b = b.reshape(-1, 1)
|
|
163
|
-
else:
|
|
164
|
-
raise ValueError(f'1D array length ({b.shape[0]}) does not match either dimension of 2D array {a.shape}')
|
|
165
|
-
|
|
166
|
-
# Perform in-place multiplication
|
|
167
|
-
try:
|
|
168
|
-
if isinstance(a, np.ndarray):
|
|
169
|
-
np.multiply(a, b, out=a)
|
|
170
|
-
else: # torch tensor
|
|
171
|
-
a.mul_(b)
|
|
172
|
-
return a
|
|
173
|
-
except (ValueError, RuntimeError) as e:
|
|
174
|
-
raise ValueError(f'Arrays have incompatible shapes for in-place broadcasting: {a.shape} and {b.shape}. '
|
|
175
|
-
f'Original error: {str(e)}') from e
|
|
176
|
-
|
|
177
|
-
def multiply(
|
|
178
|
-
a: Union[np.ndarray, torch.Tensor],
|
|
179
|
-
b: Union[np.ndarray, torch.Tensor],
|
|
180
|
-
debug: bool = False,
|
|
181
|
-
) -> Union[np.ndarray, torch.Tensor]:
|
|
182
|
-
"""Multiply two arrays element-wise with broadcasting support.
|
|
183
|
-
|
|
184
|
-
Args:
|
|
185
|
-
a: First array.
|
|
186
|
-
b: Second array.
|
|
187
|
-
|
|
188
|
-
Returns:
|
|
189
|
-
Element-wise multiplication result.
|
|
190
|
-
|
|
191
|
-
Raises:
|
|
192
|
-
ValueError: If arrays have incompatible shapes for broadcasting.
|
|
193
|
-
"""
|
|
194
|
-
if hasattr(a, 'shape') and hasattr(b, 'shape'):
|
|
195
|
-
# Check if shapes are compatible for broadcasting
|
|
196
|
-
if a.shape != b.shape and b.size != 1 and a.size != 1:
|
|
197
|
-
# Try to understand the broadcasting scenario
|
|
198
|
-
if debug:
|
|
199
|
-
print(f'Debug: Attempting to multiply arrays with shapes {a.shape} and {b.shape}')
|
|
200
|
-
|
|
201
|
-
# For 2D array * 1D array, the 1D array should match one of the 2D dimensions
|
|
202
|
-
if len(a.shape) == 2 and len(b.shape) == 1:
|
|
203
|
-
if debug:
|
|
204
|
-
if b.shape[0] == a.shape[1]:
|
|
205
|
-
print(f'Debug: Broadcasting 1D array along range dimension (axis=1)')
|
|
206
|
-
elif b.shape[0] == a.shape[0]:
|
|
207
|
-
print(f'Debug: Need to reshape 1D array for azimuth dimension (axis=0)')
|
|
208
|
-
b = b.reshape(-1, 1) # Reshape for broadcasting along azimuth
|
|
209
|
-
else:
|
|
210
|
-
raise ValueError(f'1D array length ({b.shape[0]}) does not match either dimension of 2D array {a.shape}')
|
|
211
|
-
|
|
212
|
-
# Allow broadcasting for compatible shapes
|
|
213
|
-
try:
|
|
214
|
-
result = a * b
|
|
215
|
-
if debug:
|
|
216
|
-
print(f'Debug: Broadcasting successful, result shape: {result.shape}')
|
|
217
|
-
return result
|
|
218
|
-
except (ValueError, RuntimeError) as e:
|
|
219
|
-
print(f'Debug: Broadcasting failed with error: {str(e)}')
|
|
220
|
-
raise ValueError(f'Arrays have incompatible shapes for broadcasting: {a.shape} and {b.shape}. '
|
|
221
|
-
f'Original error: {str(e)}') from e
|
|
222
|
-
|
|
223
|
-
return a * b
|
|
224
|
-
|
|
225
|
-
@flush_mem
|
|
226
|
-
@timing_decorator
|
|
227
|
-
def ifft2d(radar_data: Union[np.ndarray, torch.Tensor], backend: str = 'numpy', verbose: bool = False) -> Union[np.ndarray, torch.Tensor]:
|
|
228
|
-
"""Perform memory-efficient 2D inverse FFT on radar data.
|
|
229
|
-
|
|
230
|
-
Args:
|
|
231
|
-
radar_data: Input radar data array.
|
|
232
|
-
backend: Backend to use ('numpy' or 'torch').
|
|
233
|
-
verbose: Whether to print verbose output.
|
|
234
|
-
|
|
235
|
-
Returns:
|
|
236
|
-
Processed radar data after 2D inverse FFT.
|
|
237
|
-
|
|
238
|
-
Raises:
|
|
239
|
-
ValueError: If backend is not supported.
|
|
240
|
-
"""
|
|
241
|
-
if verbose:
|
|
242
|
-
print('Performing 2D inverse FFT...')
|
|
243
|
-
|
|
244
|
-
# Inverse FFT along azimuth dimension first
|
|
245
|
-
if backend == 'numpy':
|
|
246
|
-
radar_data = np.fft.ifft(radar_data, axis=0)
|
|
247
|
-
elif backend == 'torch':
|
|
248
|
-
radar_data = torch.fft.ifft(radar_data, dim=0)
|
|
249
|
-
else:
|
|
250
|
-
raise ValueError(f'Unsupported backend: {backend}')
|
|
251
|
-
|
|
252
|
-
# Then inverse FFT along range dimension
|
|
253
|
-
if backend == 'numpy':
|
|
254
|
-
radar_data = np.fft.ifftshift(np.fft.ifft(radar_data, axis=1), axes=1)
|
|
255
|
-
elif backend == 'torch':
|
|
256
|
-
radar_data = torch.fft.ifft(radar_data, dim=1)
|
|
257
|
-
radar_data = torch.fft.ifftshift(radar_data, dim=1)
|
|
258
|
-
else:
|
|
259
|
-
raise ValueError(f'Unsupported backend: {backend}')
|
|
260
|
-
|
|
261
|
-
if verbose:
|
|
262
|
-
print(f'2D inverse FFT completed, data shape: {radar_data.shape}')
|
|
263
|
-
print_memory()
|
|
264
|
-
|
|
265
|
-
return radar_data
|
|
266
|
-
|
|
267
|
-
@flush_mem
|
|
268
|
-
@timing_decorator
|
|
269
|
-
def iff_azimuth(
|
|
270
|
-
radar_data: Union[np.ndarray, torch.Tensor],
|
|
271
|
-
backend: str = 'numpy',
|
|
272
|
-
verbose: bool = False
|
|
273
|
-
) -> Union[np.ndarray, torch.Tensor]:
|
|
274
|
-
"""Perform memory-efficient inverse FFT along azimuth dimension.
|
|
275
|
-
|
|
276
|
-
Args:
|
|
277
|
-
radar_data: Input radar data array.
|
|
278
|
-
backend: Backend to use ('numpy' or 'torch').
|
|
279
|
-
verbose: Whether to print verbose output.
|
|
280
|
-
|
|
281
|
-
Returns:
|
|
282
|
-
Processed radar data after inverse FFT along azimuth dimension.
|
|
283
|
-
|
|
284
|
-
Raises:
|
|
285
|
-
ValueError: If backend is not supported.
|
|
286
|
-
"""
|
|
287
|
-
if verbose:
|
|
288
|
-
print('Performing inverse FFT along azimuth dimension...')
|
|
289
|
-
|
|
290
|
-
if backend == 'numpy':
|
|
291
|
-
radar_data = np.fft.ifft(radar_data, axis=0)
|
|
292
|
-
elif backend == 'torch':
|
|
293
|
-
radar_data = torch.fft.ifft(radar_data, dim=0)
|
|
294
|
-
else:
|
|
295
|
-
raise ValueError(f'Unsupported backend: {backend}')
|
|
296
|
-
|
|
297
|
-
if verbose:
|
|
298
|
-
print(f'Inverse FFT along azimuth completed, data shape: {radar_data.shape}')
|
|
299
|
-
print_memory()
|
|
300
|
-
|
|
301
|
-
return radar_data
|
|
302
|
-
|
|
303
|
-
# -------- Processing Class ----------
|
|
304
|
-
class CoarseRDA:
|
|
305
|
-
"""Memory-efficient Coarse Range Doppler Algorithm processor for SAR data.
|
|
306
|
-
|
|
307
|
-
This class implements a memory-optimized coarse Range Doppler Algorithm for processing
|
|
308
|
-
synthetic aperture radar (SAR) data, specifically designed for Sentinel-1 data.
|
|
309
|
-
|
|
310
|
-
The processing pipeline follows these main steps:
|
|
311
|
-
1. Initialization and data loading
|
|
312
|
-
2. Transmission replica generation
|
|
313
|
-
3. 2D FFT transformation
|
|
314
|
-
4. Range compression
|
|
315
|
-
5. Range Cell Migration Correction (RCMC)
|
|
316
|
-
6. Azimuth compression
|
|
317
|
-
7. Final inverse transformations
|
|
318
|
-
|
|
319
|
-
Memory optimizations include:
|
|
320
|
-
- In-place operations where possible
|
|
321
|
-
- Explicit memory cleanup
|
|
322
|
-
- Memory usage monitoring
|
|
323
|
-
- Efficient data type usage
|
|
324
|
-
"""
|
|
325
|
-
|
|
326
|
-
# ==================== INITIALIZATION METHODS ====================
|
|
327
|
-
|
|
328
|
-
def __init__(
|
|
329
|
-
self,
|
|
330
|
-
raw_data: Dict[str, Any],
|
|
331
|
-
verbose: bool = False,
|
|
332
|
-
backend: str = 'numpy',
|
|
333
|
-
memory_efficient: bool = True
|
|
334
|
-
) -> None:
|
|
335
|
-
"""Initialize the CoarseRDA processor.
|
|
336
|
-
|
|
337
|
-
Args:
|
|
338
|
-
raw_data: Dictionary containing 'echo', 'ephemeris', and 'metadata'.
|
|
339
|
-
verbose: Whether to print verbose output.
|
|
340
|
-
backend: Backend to use ('numpy', 'torch', or 'custom').
|
|
341
|
-
memory_efficient: Whether to enable memory-efficient mode.
|
|
342
|
-
|
|
343
|
-
Raises:
|
|
344
|
-
ValueError: If invalid backend is specified.
|
|
345
|
-
AssertionError: If required data is missing.
|
|
346
|
-
"""
|
|
347
|
-
self._validate_inputs(raw_data, backend)
|
|
348
|
-
self._initialize_settings(verbose, backend, memory_efficient)
|
|
349
|
-
self._load_data(raw_data)
|
|
350
|
-
self._setup_backend()
|
|
351
|
-
self._initialize_transmission_replica()
|
|
352
|
-
|
|
353
|
-
def _validate_inputs(self, raw_data: Dict[str, Any], backend: str) -> None:
|
|
354
|
-
"""Validate input parameters.
|
|
355
|
-
|
|
356
|
-
Args:
|
|
357
|
-
raw_data: Dictionary containing radar data.
|
|
358
|
-
backend: Processing backend.
|
|
359
|
-
|
|
360
|
-
Raises:
|
|
361
|
-
AssertionError: If required data is missing.
|
|
362
|
-
ValueError: If invalid backend is specified.
|
|
363
|
-
"""
|
|
364
|
-
assert isinstance(raw_data, dict), 'raw_data must be a dictionary'
|
|
365
|
-
assert 'echo' in raw_data, 'raw_data must contain "echo" key'
|
|
366
|
-
assert 'ephemeris' in raw_data, 'raw_data must contain "ephemeris" key'
|
|
367
|
-
assert 'metadata' in raw_data, 'raw_data must contain "metadata" key'
|
|
368
|
-
|
|
369
|
-
valid_backends = {'numpy', 'torch', 'custom'}
|
|
370
|
-
if backend not in valid_backends:
|
|
371
|
-
raise ValueError(f'Backend must be one of {valid_backends}, got {backend}')
|
|
372
|
-
|
|
373
|
-
def _initialize_settings(self, verbose: bool, backend: str, memory_efficient: bool) -> None:
|
|
374
|
-
"""Initialize processor settings.
|
|
375
|
-
|
|
376
|
-
Args:
|
|
377
|
-
verbose: Whether to print verbose output.
|
|
378
|
-
backend: Processing backend.
|
|
379
|
-
memory_efficient: Whether to enable memory-efficient mode.
|
|
380
|
-
"""
|
|
381
|
-
self._backend = backend
|
|
382
|
-
self._verbose = verbose
|
|
383
|
-
self._memory_efficient = memory_efficient
|
|
384
|
-
|
|
385
|
-
if self._verbose:
|
|
386
|
-
print(f'Memory efficient mode: {"enabled" if memory_efficient else "disabled"}')
|
|
387
|
-
|
|
388
|
-
def _load_data(self, raw_data: Dict[str, Any]) -> None:
|
|
389
|
-
"""Load and preprocess input data with memory optimization.
|
|
390
|
-
|
|
391
|
-
Args:
|
|
392
|
-
raw_data: Dictionary containing radar data.
|
|
393
|
-
"""
|
|
394
|
-
# Use views instead of copies where possible
|
|
395
|
-
self.radar_data = raw_data['echo']
|
|
396
|
-
|
|
397
|
-
# Create a copy for ephemeris as we need to modify it
|
|
398
|
-
self.ephemeris = raw_data['ephemeris'].copy()
|
|
399
|
-
self.ephemeris['time_stamp'] /= 2**24
|
|
400
|
-
|
|
401
|
-
# Use reference for metadata as we don't modify it
|
|
402
|
-
self.metadata = raw_data['metadata']
|
|
403
|
-
|
|
404
|
-
# Initialize dimensions - these should remain constant throughout processing
|
|
405
|
-
self.len_az_line, self.len_range_line = self.radar_data.shape
|
|
406
|
-
|
|
407
|
-
if self._verbose:
|
|
408
|
-
print(f'Loaded radar data with shape: {self.radar_data.shape}')
|
|
409
|
-
print(f'Azimuth lines: {self.len_az_line}, Range lines: {self.len_range_line}')
|
|
410
|
-
print(f'Data type: {self.radar_data.dtype}')
|
|
411
|
-
print(f'Memory usage: {self.radar_data.nbytes / 1024**3:.2f} GB')
|
|
412
|
-
|
|
413
|
-
def _setup_backend(self) -> None:
|
|
414
|
-
"""Set up processing backend and device configuration."""
|
|
415
|
-
if self._backend == 'torch':
|
|
416
|
-
if torch is None:
|
|
417
|
-
raise ImportError('PyTorch is required for torch backend but not available')
|
|
418
|
-
self.device = getattr(
|
|
419
|
-
self.radar_data,
|
|
420
|
-
'device',
|
|
421
|
-
torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
422
|
-
)
|
|
423
|
-
if self._verbose:
|
|
424
|
-
print(f'Selected device: {self.device}')
|
|
425
|
-
|
|
426
|
-
def _initialize_transmission_replica(self) -> None:
|
|
427
|
-
"""Initialize transmission replica during setup."""
|
|
428
|
-
self._generate_tx_replica()
|
|
429
|
-
|
|
430
|
-
# ==================== TRANSMISSION REPLICA METHODS ====================
|
|
431
|
-
|
|
432
|
-
@timing_decorator
|
|
433
|
-
def _generate_tx_replica(self) -> None:
|
|
434
|
-
"""Generate transmission replica based on metadata parameters.
|
|
435
|
-
|
|
436
|
-
This method creates the transmission replica used for range compression
|
|
437
|
-
based on the radar system parameters extracted from metadata.
|
|
438
|
-
"""
|
|
439
|
-
if self._verbose:
|
|
440
|
-
print('Generating transmission replica...')
|
|
441
|
-
|
|
442
|
-
# Extract range decimation and calculate sample frequency
|
|
443
|
-
rgdec = int(self.metadata['range_decimation'].unique()[0])
|
|
444
|
-
if self._verbose:
|
|
445
|
-
print(f'Range decimation code: {rgdec}')
|
|
446
|
-
|
|
447
|
-
self.range_sample_freq = range_dec_to_sample_rate(rgdec)
|
|
448
|
-
if self._verbose:
|
|
449
|
-
print(f'Range sample frequency: {self.range_sample_freq:.2f} Hz')
|
|
450
|
-
|
|
451
|
-
# Extract transmission parameters
|
|
452
|
-
tx_params = self._extract_tx_parameters()
|
|
453
|
-
|
|
454
|
-
# Generate replica signal
|
|
455
|
-
self._create_replica_signal(tx_params)
|
|
456
|
-
|
|
457
|
-
if self._verbose:
|
|
458
|
-
print(f'Replica length: {self.replica_len}')
|
|
459
|
-
print('Transmission replica generated successfully!')
|
|
460
|
-
|
|
461
|
-
def _extract_tx_parameters(self) -> Dict[str, float]:
|
|
462
|
-
"""Extract transmission parameters from metadata.
|
|
463
|
-
|
|
464
|
-
Returns:
|
|
465
|
-
Dictionary containing transmission parameters.
|
|
466
|
-
"""
|
|
467
|
-
txpsf = self.metadata['tx_pulse_start_freq'].unique()[0]
|
|
468
|
-
txprr = self.metadata['tx_ramp_rate'].unique()[0]
|
|
469
|
-
txpl = self.metadata['tx_pulse_length'].unique()[0]
|
|
470
|
-
|
|
471
|
-
if self._verbose:
|
|
472
|
-
print(f'TX pulse start frequency: {txpsf:.2f} Hz')
|
|
473
|
-
print(f'TX ramp rate: {txprr:.2f} Hz/s')
|
|
474
|
-
print(f'TX pulse length: {txpl:.6f} s')
|
|
475
|
-
|
|
476
|
-
return {'start_freq': txpsf, 'ramp_rate': txprr, 'pulse_length': txpl}
|
|
477
|
-
|
|
478
|
-
def _create_replica_signal(self, tx_params: Dict[str, float]) -> None:
|
|
479
|
-
"""Create the replica signal from transmission parameters.
|
|
480
|
-
|
|
481
|
-
Args:
|
|
482
|
-
tx_params: Dictionary containing transmission parameters.
|
|
483
|
-
"""
|
|
484
|
-
txpsf = tx_params['start_freq']
|
|
485
|
-
txprr = tx_params['ramp_rate']
|
|
486
|
-
txpl = tx_params['pulse_length']
|
|
487
|
-
|
|
488
|
-
# Generate replica
|
|
489
|
-
self.num_tx_vals = int(txpl * self.range_sample_freq)
|
|
490
|
-
if self._verbose:
|
|
491
|
-
print(f'Number of TX values: {self.num_tx_vals}')
|
|
492
|
-
|
|
493
|
-
tx_replica_time_vals = np.linspace(-txpl/2, txpl/2, num=self.num_tx_vals)
|
|
494
|
-
phi1 = txpsf + txprr * txpl / 2
|
|
495
|
-
phi2 = txprr / 2
|
|
496
|
-
|
|
497
|
-
if self._verbose:
|
|
498
|
-
print(f'Phase parameters - phi1: {phi1:.2f}, phi2: {phi2:.2e}')
|
|
499
|
-
|
|
500
|
-
self.tx_replica = np.exp(
|
|
501
|
-
2j * np.pi * (phi1 * tx_replica_time_vals + phi2 * tx_replica_time_vals**2)
|
|
502
|
-
)
|
|
503
|
-
self.replica_len = len(self.tx_replica)
|
|
504
|
-
|
|
505
|
-
# ==================== FFT METHODS ====================
|
|
506
|
-
|
|
507
|
-
@flush_mem
|
|
508
|
-
@timing_decorator
|
|
509
|
-
def fft2d(self, w_pad: Optional[int] = None, executors: int = 12) -> None:
|
|
510
|
-
"""Perform memory-efficient 2D FFT on radar data in range and azimuth dimensions.
|
|
511
|
-
|
|
512
|
-
Args:
|
|
513
|
-
w_pad: Width padding for range FFT (ignored for dimension preservation).
|
|
514
|
-
executors: Number of executors for custom backend.
|
|
515
|
-
|
|
516
|
-
Raises:
|
|
517
|
-
ValueError: If backend is not supported.
|
|
518
|
-
"""
|
|
519
|
-
if self._verbose:
|
|
520
|
-
print(f'FFT input data shape: {self.radar_data.shape}')
|
|
521
|
-
print_memory()
|
|
522
|
-
|
|
523
|
-
if self._backend == 'numpy':
|
|
524
|
-
self._fft2d_numpy_efficient()
|
|
525
|
-
elif self._backend == 'custom':
|
|
526
|
-
self._fft2d_custom(executors)
|
|
527
|
-
elif self._backend == 'torch':
|
|
528
|
-
self._fft2d_torch_efficient()
|
|
529
|
-
else:
|
|
530
|
-
raise ValueError(f'Backend {self._backend} not supported')
|
|
531
|
-
|
|
532
|
-
# Verify dimensions are preserved
|
|
533
|
-
expected_shape = (self.len_az_line, self.len_range_line)
|
|
534
|
-
if self.radar_data.shape != expected_shape:
|
|
535
|
-
raise RuntimeError(f'FFT changed radar data shape from {expected_shape} to {self.radar_data.shape}')
|
|
536
|
-
|
|
537
|
-
if self._verbose:
|
|
538
|
-
print(f'FFT output data shape: {self.radar_data.shape}')
|
|
539
|
-
print('- FFT performed successfully!')
|
|
540
|
-
print_memory()
|
|
541
|
-
|
|
542
|
-
def _fft2d_numpy_efficient(self) -> None:
|
|
543
|
-
"""Perform memory-efficient 2D FFT using NumPy backend preserving original dimensions.
|
|
544
|
-
|
|
545
|
-
Uses in-place operations and memory cleanup for better efficiency.
|
|
546
|
-
"""
|
|
547
|
-
# Store original shape for verification
|
|
548
|
-
original_shape = self.radar_data.shape
|
|
549
|
-
if self._verbose:
|
|
550
|
-
print(f'Original radar data shape: {original_shape}')
|
|
551
|
-
|
|
552
|
-
# Ensure data is contiguous and maintain original precision
|
|
553
|
-
if not self.radar_data.flags.c_contiguous:
|
|
554
|
-
if self._verbose:
|
|
555
|
-
print('Making data contiguous...')
|
|
556
|
-
self.radar_data = np.ascontiguousarray(self.radar_data)
|
|
557
|
-
|
|
558
|
-
# FFT each range line (axis=1) - EXACT SAME as original
|
|
559
|
-
if self._verbose:
|
|
560
|
-
print(f'Performing FFT along range dimension (axis=1)...')
|
|
561
|
-
|
|
562
|
-
# Use same approach as original - no dtype changes
|
|
563
|
-
self.radar_data = np.fft.fft(self.radar_data, axis=1)
|
|
564
|
-
|
|
565
|
-
if self._verbose:
|
|
566
|
-
print(f'First FFT along range dimension completed, shape: {self.radar_data.shape}')
|
|
567
|
-
print_memory()
|
|
568
|
-
|
|
569
|
-
# FFT each azimuth line (axis=0) with fftshift - EXACT SAME as original
|
|
570
|
-
if self._verbose:
|
|
571
|
-
print(f'Performing FFT along azimuth dimension (axis=0) with fftshift...')
|
|
572
|
-
|
|
573
|
-
# Use same approach as original
|
|
574
|
-
self.radar_data = np.fft.fftshift(np.fft.fft(self.radar_data, axis=0), axes=0)
|
|
575
|
-
|
|
576
|
-
if self._verbose:
|
|
577
|
-
print(f'Second FFT along azimuth dimension completed, shape: {self.radar_data.shape}')
|
|
578
|
-
print_memory()
|
|
579
|
-
|
|
580
|
-
# Verify shape preservation
|
|
581
|
-
assert self.radar_data.shape == original_shape, \
|
|
582
|
-
f'FFT changed shape from {original_shape} to {self.radar_data.shape}'
|
|
583
|
-
|
|
584
|
-
def _fft2d_torch_efficient(self) -> None:
|
|
585
|
-
"""Perform memory-efficient 2D FFT using PyTorch backend preserving dimensions.
|
|
586
|
-
|
|
587
|
-
Uses in-place operations where possible.
|
|
588
|
-
"""
|
|
589
|
-
original_shape = self.radar_data.shape
|
|
590
|
-
|
|
591
|
-
if self._verbose:
|
|
592
|
-
print('Performing memory-efficient PyTorch FFT...')
|
|
593
|
-
print_memory()
|
|
594
|
-
|
|
595
|
-
# FFT each range line (axis=1) - in-place when possible
|
|
596
|
-
if self._memory_efficient:
|
|
597
|
-
temp = torch.fft.fft(self.radar_data, dim=1)
|
|
598
|
-
self.radar_data.copy_(temp)
|
|
599
|
-
del temp
|
|
600
|
-
torch.cuda.empty_cache() if torch.cuda.is_available() else None
|
|
601
|
-
else:
|
|
602
|
-
self.radar_data = torch.fft.fft(self.radar_data, dim=1)
|
|
603
|
-
|
|
604
|
-
# FFT each azimuth line (axis=0) with fftshift
|
|
605
|
-
if self._memory_efficient:
|
|
606
|
-
temp = torch.fft.fft(self.radar_data, dim=0)
|
|
607
|
-
self.radar_data.copy_(temp)
|
|
608
|
-
del temp
|
|
609
|
-
|
|
610
|
-
temp = torch.fft.fftshift(self.radar_data, dim=0)
|
|
611
|
-
self.radar_data.copy_(temp)
|
|
612
|
-
del temp
|
|
613
|
-
torch.cuda.empty_cache() if torch.cuda.is_available() else None
|
|
614
|
-
else:
|
|
615
|
-
self.radar_data = torch.fft.fftshift(
|
|
616
|
-
torch.fft.fft(self.radar_data, dim=0),
|
|
617
|
-
dim=0
|
|
618
|
-
)
|
|
619
|
-
|
|
620
|
-
# Verify shape preservation
|
|
621
|
-
assert self.radar_data.shape == original_shape, \
|
|
622
|
-
f'Torch FFT changed shape from {original_shape} to {self.radar_data.shape}'
|
|
623
|
-
|
|
624
|
-
@flush_mem
|
|
625
|
-
@timing_decorator
|
|
626
|
-
def ifft_range(self) -> None:
|
|
627
|
-
"""Perform memory-efficient inverse FFT along range dimension."""
|
|
628
|
-
if self._backend == 'numpy':
|
|
629
|
-
# Use EXACT SAME approach as original
|
|
630
|
-
self.radar_data = np.fft.ifftshift(np.fft.ifft(self.radar_data, axis=1), axes=1)
|
|
631
|
-
elif self._backend == 'torch':
|
|
632
|
-
self.radar_data = torch.fft.ifft(self.radar_data, dim=1)
|
|
633
|
-
self.radar_data = torch.fft.ifftshift(self.radar_data, dim=1)
|
|
634
|
-
else:
|
|
635
|
-
raise ValueError(f'Unsupported backend: {self._backend}')
|
|
636
|
-
|
|
637
|
-
@flush_mem
|
|
638
|
-
@timing_decorator
|
|
639
|
-
def ifft_azimuth(self) -> None:
|
|
640
|
-
"""Perform memory-efficient inverse FFT along azimuth dimension."""
|
|
641
|
-
if self._backend == 'numpy':
|
|
642
|
-
# Use EXACT SAME approach as original
|
|
643
|
-
self.radar_data = np.fft.ifft(self.radar_data, axis=0)
|
|
644
|
-
elif self._backend == 'torch':
|
|
645
|
-
self.radar_data = torch.fft.ifft(self.radar_data, dim=0)
|
|
646
|
-
else:
|
|
647
|
-
raise ValueError(f'Unsupported backend: {self._backend}')
|
|
648
|
-
|
|
649
|
-
# ==================== FILTER GENERATION METHODS ====================
|
|
650
|
-
|
|
651
|
-
@flush_mem
|
|
652
|
-
@timing_decorator
|
|
653
|
-
def get_range_filter(self, pad_w: int = 0) -> np.ndarray:
|
|
654
|
-
"""Compute memory-efficient range filter for radar data compression.
|
|
655
|
-
|
|
656
|
-
Args:
|
|
657
|
-
pad_w: Width padding (ignored - filter always matches radar data dimensions).
|
|
658
|
-
|
|
659
|
-
Returns:
|
|
660
|
-
Range filter array exactly matching radar data range dimension.
|
|
661
|
-
|
|
662
|
-
Raises:
|
|
663
|
-
AssertionError: If filter dimensions are invalid.
|
|
664
|
-
"""
|
|
665
|
-
# Use exact radar data dimensions - no padding considerations
|
|
666
|
-
current_range_dim = self.radar_data.shape[1]
|
|
667
|
-
|
|
668
|
-
if self._verbose:
|
|
669
|
-
print(f'Creating range filter for radar data shape: {self.radar_data.shape}')
|
|
670
|
-
print(f'Range dimension: {current_range_dim}')
|
|
671
|
-
print(f'TX replica length: {self.num_tx_vals}')
|
|
672
|
-
|
|
673
|
-
# Create range filter with exact radar data range dimension - KEEP ORIGINAL PRECISION
|
|
674
|
-
range_filter = np.zeros(current_range_dim, dtype=complex)
|
|
675
|
-
|
|
676
|
-
# Place replica in center of filter
|
|
677
|
-
if current_range_dim >= self.num_tx_vals:
|
|
678
|
-
index_start = (current_range_dim - self.num_tx_vals) // 2
|
|
679
|
-
index_end = index_start + self.num_tx_vals
|
|
680
|
-
|
|
681
|
-
if self._verbose:
|
|
682
|
-
print(f'Placing replica at indices [{index_start}:{index_end}] in filter of length {current_range_dim}')
|
|
683
|
-
|
|
684
|
-
range_filter[index_start:index_end] = self.tx_replica
|
|
685
|
-
else:
|
|
686
|
-
# If range dimension is smaller than replica, truncate replica
|
|
687
|
-
if self._verbose:
|
|
688
|
-
print(f'⚠️ Range dimension ({current_range_dim}) < replica length ({self.num_tx_vals}), truncating replica')
|
|
689
|
-
|
|
690
|
-
replica_start = (self.num_tx_vals - current_range_dim) // 2
|
|
691
|
-
replica_end = replica_start + current_range_dim
|
|
692
|
-
range_filter[:] = self.tx_replica[replica_start:replica_end]
|
|
693
|
-
|
|
694
|
-
# Apply FFT and conjugate - EXACT SAME as original
|
|
695
|
-
range_filter = np.conjugate(np.fft.fft(range_filter))
|
|
696
|
-
|
|
697
|
-
if self._verbose:
|
|
698
|
-
print(f'Range filter shape: {range_filter.shape}')
|
|
699
|
-
|
|
700
|
-
# Ensure filter exactly matches radar data range dimension
|
|
701
|
-
assert range_filter.shape[0] == current_range_dim, \
|
|
702
|
-
f'Filter shape mismatch: expected {current_range_dim}, got {range_filter.shape[0]}'
|
|
703
|
-
|
|
704
|
-
return range_filter
|
|
705
|
-
|
|
706
|
-
@flush_mem
|
|
707
|
-
@timing_decorator
|
|
708
|
-
def get_rcmc(self) -> np.ndarray:
|
|
709
|
-
"""Calculate memory-efficient Range Cell Migration Correction filter.
|
|
710
|
-
|
|
711
|
-
Returns:
|
|
712
|
-
RCMC filter array matching radar data dimensions.
|
|
713
|
-
"""
|
|
714
|
-
self._compute_effective_velocities()
|
|
715
|
-
|
|
716
|
-
self.wavelength = cnst.TX_WAVELENGTH_M
|
|
717
|
-
|
|
718
|
-
# Generate azimuth frequency values for the entire azimuth line length
|
|
719
|
-
self.az_freq_vals = np.arange(
|
|
720
|
-
-self.az_sample_freq/2,
|
|
721
|
-
self.az_sample_freq/2,
|
|
722
|
-
self.az_sample_freq/self.len_az_line
|
|
723
|
-
)
|
|
724
|
-
|
|
725
|
-
# Ensure we have exactly the right number of frequency values
|
|
726
|
-
if len(self.az_freq_vals) != self.len_az_line:
|
|
727
|
-
self.az_freq_vals = np.linspace(
|
|
728
|
-
-self.az_sample_freq/2,
|
|
729
|
-
self.az_sample_freq/2,
|
|
730
|
-
self.len_az_line,
|
|
731
|
-
endpoint=False
|
|
732
|
-
)
|
|
733
|
-
|
|
734
|
-
if self._verbose:
|
|
735
|
-
print(f'Azimuth frequency values shape: {self.az_freq_vals.shape}')
|
|
736
|
-
print(f'Effective velocities shape: {self.effective_velocities.shape}')
|
|
737
|
-
|
|
738
|
-
# Take mean effective velocity across range for each azimuth line
|
|
739
|
-
# This reduces from (56130, 25724) to (56130,)
|
|
740
|
-
mean_effective_velocities = np.mean(self.effective_velocities, axis=1)
|
|
741
|
-
|
|
742
|
-
if self._verbose:
|
|
743
|
-
print(f'Mean effective velocities shape: {mean_effective_velocities.shape}')
|
|
744
|
-
|
|
745
|
-
# Cosine of instantaneous squint angle
|
|
746
|
-
# Broadcasting: (56130,) with (56130,) -> (56130,)
|
|
747
|
-
self.D = np.sqrt(
|
|
748
|
-
1 - (self.wavelength**2 * self.az_freq_vals**2) /
|
|
749
|
-
(4 * mean_effective_velocities**2)
|
|
750
|
-
)
|
|
751
|
-
|
|
752
|
-
if self._verbose:
|
|
753
|
-
print(f'D (cosine squint angle) shape: {self.D.shape}')
|
|
754
|
-
|
|
755
|
-
# Create RCMC filter with CURRENT radar data dimensions (should be original dimensions)
|
|
756
|
-
current_range_dim = self.radar_data.shape[1]
|
|
757
|
-
|
|
758
|
-
range_freq_vals = np.linspace(
|
|
759
|
-
-self.range_sample_freq/2,
|
|
760
|
-
self.range_sample_freq/2,
|
|
761
|
-
num=current_range_dim
|
|
762
|
-
)
|
|
763
|
-
|
|
764
|
-
if self._verbose:
|
|
765
|
-
print(f'Range frequency values shape: {range_freq_vals.shape}')
|
|
766
|
-
print(f'Current radar data range dimension: {current_range_dim}')
|
|
767
|
-
print(f'Slant range vec shape: {self.slant_range_vec.shape}')
|
|
768
|
-
|
|
769
|
-
# Calculate RCMC shift - use first slant range value for reference
|
|
770
|
-
rcmc_shift = self.slant_range_vec[0] * (1/self.D - 1)
|
|
771
|
-
|
|
772
|
-
if self._verbose:
|
|
773
|
-
print(f'RCMC shift shape: {rcmc_shift.shape}')
|
|
774
|
-
|
|
775
|
-
# IMPORTANT: Use same calculation method as original - no chunked processing
|
|
776
|
-
# Broadcasting for final filter calculation
|
|
777
|
-
range_freq_2d = range_freq_vals[np.newaxis, :] # (1, current_range_dim)
|
|
778
|
-
rcmc_shift_2d = rcmc_shift[:, np.newaxis] # (56130, 1)
|
|
779
|
-
|
|
780
|
-
rcmc_filter = np.exp(4j * np.pi * range_freq_2d * rcmc_shift_2d / self.c)
|
|
781
|
-
|
|
782
|
-
if self._verbose:
|
|
783
|
-
print(f'Final RCMC filter shape: {rcmc_filter.shape}')
|
|
784
|
-
|
|
785
|
-
return rcmc_filter
|
|
786
|
-
|
|
787
|
-
@flush_mem
|
|
788
|
-
@timing_decorator
|
|
789
|
-
def get_azimuth_filter(self) -> np.ndarray:
|
|
790
|
-
"""Calculate memory-efficient azimuth compression filter.
|
|
791
|
-
|
|
792
|
-
Returns:
|
|
793
|
-
Azimuth filter array matching radar data dimensions.
|
|
794
|
-
"""
|
|
795
|
-
if self._verbose:
|
|
796
|
-
print(f'Computing azimuth filter...')
|
|
797
|
-
print(f'Slant range vec shape: {self.slant_range_vec.shape}')
|
|
798
|
-
print(f'D shape: {self.D.shape}')
|
|
799
|
-
print(f'Wavelength: {self.wavelength}')
|
|
800
|
-
|
|
801
|
-
# Use current radar data dimensions (should match original slant range vector)
|
|
802
|
-
current_range_dim = self.radar_data.shape[1]
|
|
803
|
-
|
|
804
|
-
# Ensure slant range vector matches radar data dimensions
|
|
805
|
-
if current_range_dim != len(self.slant_range_vec):
|
|
806
|
-
if self._verbose:
|
|
807
|
-
print(f'⚠️ Warning: Current range dim ({current_range_dim}) != slant range vec length ({len(self.slant_range_vec)})')
|
|
808
|
-
print(f'This should not happen - using original slant range vector')
|
|
809
|
-
current_slant_range_vec = self.slant_range_vec
|
|
810
|
-
else:
|
|
811
|
-
current_slant_range_vec = self.slant_range_vec
|
|
812
|
-
|
|
813
|
-
# IMPORTANT: Use same calculation method as original - no chunked processing
|
|
814
|
-
# Broadcasting for azimuth filter calculation
|
|
815
|
-
slant_range_2d = current_slant_range_vec[np.newaxis, :] # (1, range_dim)
|
|
816
|
-
D_2d = self.D[:, np.newaxis] # (56130, 1)
|
|
817
|
-
|
|
818
|
-
azimuth_filter = np.exp(4j * np.pi * slant_range_2d * D_2d / self.wavelength)
|
|
819
|
-
|
|
820
|
-
if self._verbose:
|
|
821
|
-
print(f'Azimuth filter shape: {azimuth_filter.shape}')
|
|
822
|
-
|
|
823
|
-
return azimuth_filter
|
|
824
|
-
|
|
825
|
-
# ==================== VELOCITY COMPUTATION METHODS ====================
|
|
826
|
-
|
|
827
|
-
@timing_decorator
|
|
828
|
-
def _compute_effective_velocities(self) -> None:
|
|
829
|
-
"""Calculate effective spacecraft velocities for processing.
|
|
830
|
-
|
|
831
|
-
This method computes the effective velocities needed for RCMC and
|
|
832
|
-
azimuth compression by combining spacecraft and ground velocities.
|
|
833
|
-
"""
|
|
834
|
-
# Initialize timing and geometry parameters
|
|
835
|
-
self._initialize_timing_parameters()
|
|
836
|
-
|
|
837
|
-
# Calculate spacecraft velocities and positions
|
|
838
|
-
space_velocities, positions = self._calculate_spacecraft_dynamics()
|
|
839
|
-
|
|
840
|
-
# Compute effective velocities using Earth model
|
|
841
|
-
self._compute_ground_velocities(space_velocities, positions)
|
|
842
|
-
|
|
843
|
-
def _initialize_timing_parameters(self) -> None:
|
|
844
|
-
"""Initialize timing and geometry parameters for velocity computation.
|
|
845
|
-
|
|
846
|
-
Raises:
|
|
847
|
-
KeyError: If required metadata columns are missing.
|
|
848
|
-
ValueError: If metadata values are invalid.
|
|
849
|
-
"""
|
|
850
|
-
self.c = cnst.SPEED_OF_LIGHT_MPS
|
|
851
|
-
|
|
852
|
-
# Check for required columns with case-insensitive matching
|
|
853
|
-
metadata_columns = {col.lower(): col for col in self.metadata.columns}
|
|
854
|
-
|
|
855
|
-
required_mappings = {
|
|
856
|
-
'pri': ['pri', 'pulse_repetition_interval'],
|
|
857
|
-
'rank': ['rank'],
|
|
858
|
-
'swst': ['swst', 'sampling_window_start_time', 'start_time']
|
|
859
|
-
}
|
|
860
|
-
|
|
861
|
-
column_map = {}
|
|
862
|
-
for param, possible_names in required_mappings.items():
|
|
863
|
-
found_column = None
|
|
864
|
-
for name in possible_names:
|
|
865
|
-
if name.lower() in metadata_columns:
|
|
866
|
-
found_column = metadata_columns[name.lower()]
|
|
867
|
-
break
|
|
868
|
-
|
|
869
|
-
if found_column is None:
|
|
870
|
-
available_cols = list(self.metadata.columns)
|
|
871
|
-
raise KeyError(
|
|
872
|
-
f'Could not find column for {param}. Tried: {possible_names}. '
|
|
873
|
-
f'Available columns: {available_cols}'
|
|
874
|
-
)
|
|
875
|
-
column_map[param] = found_column
|
|
876
|
-
|
|
877
|
-
if self._verbose:
|
|
878
|
-
print('Column mapping:')
|
|
879
|
-
for param, col in column_map.items():
|
|
880
|
-
print(f' {param} -> {col}')
|
|
881
|
-
|
|
882
|
-
# Extract parameters with error handling
|
|
883
|
-
try:
|
|
884
|
-
self.pri = self.metadata[column_map['pri']].iloc[0]
|
|
885
|
-
rank = self.metadata[column_map['rank']].iloc[0]
|
|
886
|
-
range_start_time_base = self.metadata[column_map['swst']].iloc[0]
|
|
887
|
-
except (IndexError, TypeError) as e:
|
|
888
|
-
raise ValueError(f'Error extracting metadata values: {str(e)}') from e
|
|
889
|
-
|
|
890
|
-
# Validate values
|
|
891
|
-
assert self.pri > 0, f'PRI must be positive, got {self.pri}'
|
|
892
|
-
assert rank >= 0, f'Rank must be non-negative, got {rank}'
|
|
893
|
-
|
|
894
|
-
if self._verbose:
|
|
895
|
-
print(f'PRI: {self.pri:.6f} s')
|
|
896
|
-
print(f'Rank: {rank}')
|
|
897
|
-
print(f'Base range start time: {range_start_time_base:.6f} s')
|
|
898
|
-
|
|
899
|
-
# Calculate derived parameters
|
|
900
|
-
suppressed_data_time = 320 / (8 * cnst.F_REF)
|
|
901
|
-
range_start_time = range_start_time_base + suppressed_data_time
|
|
902
|
-
|
|
903
|
-
# Sample rates
|
|
904
|
-
range_sample_period = 1 / self.range_sample_freq
|
|
905
|
-
self.az_sample_freq = 1 / self.pri
|
|
906
|
-
|
|
907
|
-
if self._verbose:
|
|
908
|
-
print(f'Range start time: {range_start_time:.6f} s')
|
|
909
|
-
print(f'Azimuth sample frequency: {self.az_sample_freq:.2f} Hz')
|
|
910
|
-
|
|
911
|
-
# Fast time and slant range vectors
|
|
912
|
-
sample_num_along_range_line = np.arange(0, self.len_range_line, 1)
|
|
913
|
-
fast_time_vec = range_start_time + (range_sample_period * sample_num_along_range_line)
|
|
914
|
-
self.slant_range_vec = ((rank * self.pri) + fast_time_vec) * self.c / 2
|
|
915
|
-
|
|
916
|
-
if self._verbose:
|
|
917
|
-
print(f'Slant range vector shape: {self.slant_range_vec.shape}')
|
|
918
|
-
print(f'Slant range min/max: {self.slant_range_vec.min():.2f}/{self.slant_range_vec.max():.2f} m')
|
|
919
|
-
|
|
920
|
-
def _calculate_spacecraft_dynamics(self) -> Tuple[np.ndarray, np.ndarray]:
|
|
921
|
-
"""Calculate spacecraft velocities and positions.
|
|
922
|
-
|
|
923
|
-
Returns:
|
|
924
|
-
Tuple of (space_velocities, positions) arrays.
|
|
925
|
-
|
|
926
|
-
Raises:
|
|
927
|
-
KeyError: If required columns are missing from ephemeris or metadata.
|
|
928
|
-
ValueError: If interpolation fails.
|
|
929
|
-
"""
|
|
930
|
-
if self._verbose:
|
|
931
|
-
print('Calculating spacecraft dynamics...')
|
|
932
|
-
print(f'Ephemeris shape: {self.ephemeris.shape}')
|
|
933
|
-
print(f'Metadata shape: {self.metadata.shape}')
|
|
934
|
-
|
|
935
|
-
# Spacecraft velocity calculations
|
|
936
|
-
ecef_vels = self.ephemeris.apply(
|
|
937
|
-
lambda x: math.sqrt(x['vx']**2 + x['vy']**2 + x['vz']**2),
|
|
938
|
-
axis=1
|
|
939
|
-
)
|
|
940
|
-
|
|
941
|
-
if self._verbose:
|
|
942
|
-
print(f'ECEF velocities shape: {ecef_vels.shape}')
|
|
943
|
-
print(f'ECEF velocities range: {ecef_vels.min():.2f} - {ecef_vels.max():.2f} m/s')
|
|
944
|
-
|
|
945
|
-
# Extract arrays and ensure they are proper numpy arrays
|
|
946
|
-
time_stamps = self.ephemeris['time_stamp'].values
|
|
947
|
-
velocity_values = ecef_vels.values
|
|
948
|
-
x_values = self.ephemeris['x'].values
|
|
949
|
-
y_values = self.ephemeris['y'].values
|
|
950
|
-
z_values = self.ephemeris['z'].values
|
|
951
|
-
|
|
952
|
-
if self._verbose:
|
|
953
|
-
print(f'Time stamps shape: {time_stamps.shape}')
|
|
954
|
-
print(f'Time stamps range: {time_stamps.min():.6f} - {time_stamps.max():.6f}')
|
|
955
|
-
print(f' veocity values shape: {velocity_values.shape}')
|
|
956
|
-
print(f'Position arrays shapes: x={x_values.shape}, y={y_values.shape}, z={z_values.shape}')
|
|
957
|
-
|
|
958
|
-
# Ensure arrays are sorted by time for interpolation
|
|
959
|
-
sort_indices = np.argsort(time_stamps)
|
|
960
|
-
time_stamps = time_stamps[sort_indices]
|
|
961
|
-
velocity_values = velocity_values[sort_indices]
|
|
962
|
-
x_values = x_values[sort_indices]
|
|
963
|
-
y_values = y_values[sort_indices]
|
|
964
|
-
z_values = z_values[sort_indices]
|
|
965
|
-
|
|
966
|
-
# Calculate metadata time stamps
|
|
967
|
-
metadata_times = self.metadata.apply(
|
|
968
|
-
lambda x: x['coarse_time'] + x['fine_time'],
|
|
969
|
-
axis=1
|
|
970
|
-
).values
|
|
971
|
-
|
|
972
|
-
if self._verbose:
|
|
973
|
-
print(f'Metadata times shape: {metadata_times.shape}')
|
|
974
|
-
print(f'Metadata times range: {metadata_times.min():.6f} - {metadata_times.max():.6f}')
|
|
975
|
-
print(f'Ephemeris time range: {time_stamps.min():.6f} - {time_stamps.max():.6f}')
|
|
976
|
-
|
|
977
|
-
# Check if metadata times are within ephemeris time range
|
|
978
|
-
time_within_range = (metadata_times >= time_stamps.min()) & (metadata_times <= time_stamps.max())
|
|
979
|
-
if not np.all(time_within_range):
|
|
980
|
-
out_of_range_count = np.sum(~time_within_range)
|
|
981
|
-
if self._verbose:
|
|
982
|
-
print(f'⚠️ Warning: {out_of_range_count} metadata times are outside ephemeris range')
|
|
983
|
-
print(f' Will use boundary values for extrapolation')
|
|
984
|
-
|
|
985
|
-
# Create interpolation functions with bounds_error=False and fill_value for extrapolation
|
|
986
|
-
try:
|
|
987
|
-
velocity_interp = interp1d(
|
|
988
|
-
time_stamps, velocity_values,
|
|
989
|
-
kind='linear',
|
|
990
|
-
bounds_error=False,
|
|
991
|
-
fill_value=(velocity_values[0], velocity_values[-1])
|
|
992
|
-
)
|
|
993
|
-
x_interp = interp1d(
|
|
994
|
-
time_stamps, x_values,
|
|
995
|
-
kind='linear',
|
|
996
|
-
bounds_error=False,
|
|
997
|
-
fill_value=(x_values[0], x_values[-1])
|
|
998
|
-
)
|
|
999
|
-
y_interp = interp1d(
|
|
1000
|
-
time_stamps, y_values,
|
|
1001
|
-
kind='linear',
|
|
1002
|
-
bounds_error=False,
|
|
1003
|
-
fill_value=(y_values[0], y_values[-1])
|
|
1004
|
-
)
|
|
1005
|
-
z_interp = interp1d(
|
|
1006
|
-
time_stamps, z_values,
|
|
1007
|
-
kind='linear',
|
|
1008
|
-
bounds_error=False,
|
|
1009
|
-
fill_value=(z_values[0], z_values[-1])
|
|
1010
|
-
)
|
|
1011
|
-
except ValueError as e:
|
|
1012
|
-
raise ValueError(f'Failed to create interpolation functions: {str(e)}') from e
|
|
1013
|
-
|
|
1014
|
-
# Interpolate at metadata time points
|
|
1015
|
-
try:
|
|
1016
|
-
space_velocities = velocity_interp(metadata_times)
|
|
1017
|
-
x_interp_vals = x_interp(metadata_times)
|
|
1018
|
-
y_interp_vals = y_interp(metadata_times)
|
|
1019
|
-
z_interp_vals = z_interp(metadata_times)
|
|
1020
|
-
except Exception as e:
|
|
1021
|
-
raise ValueError(f'Interpolation failed: {str(e)}') from e
|
|
1022
|
-
|
|
1023
|
-
# Ensure interpolated values are arrays and handle any remaining NaN values
|
|
1024
|
-
space_velocities = np.asarray(space_velocities)
|
|
1025
|
-
x_interp_vals = np.asarray(x_interp_vals)
|
|
1026
|
-
y_interp_vals = np.asarray(y_interp_vals)
|
|
1027
|
-
z_interp_vals = np.asarray(z_interp_vals)
|
|
1028
|
-
|
|
1029
|
-
# Check for and handle NaN values
|
|
1030
|
-
if np.any(np.isnan(space_velocities)):
|
|
1031
|
-
nan_count = np.sum(np.isnan(space_velocities))
|
|
1032
|
-
if self._verbose:
|
|
1033
|
-
print(f'⚠️ Found {nan_count} NaN values in space_velocities, filling with nearest valid values')
|
|
1034
|
-
|
|
1035
|
-
# Fill NaN values with nearest valid values
|
|
1036
|
-
valid_mask = ~np.isnan(space_velocities)
|
|
1037
|
-
if np.any(valid_mask):
|
|
1038
|
-
# Forward fill
|
|
1039
|
-
space_velocities = pd.Series(space_velocities).fillna(method='ffill').fillna(method='bfill').values
|
|
1040
|
-
else:
|
|
1041
|
-
# If all NaN, use average ephemeris velocity
|
|
1042
|
-
space_velocities.fill(np.nanmean(velocity_values))
|
|
1043
|
-
|
|
1044
|
-
# Handle NaN values in position components
|
|
1045
|
-
for vals, name in [(x_interp_vals, 'x'), (y_interp_vals, 'y'), (z_interp_vals, 'z')]:
|
|
1046
|
-
if np.any(np.isnan(vals)):
|
|
1047
|
-
nan_count = np.sum(np.isnan(vals))
|
|
1048
|
-
if self._verbose:
|
|
1049
|
-
print(f'⚠️ Found {nan_count} NaN values in {name}_interp_vals, filling with nearest valid values')
|
|
1050
|
-
|
|
1051
|
-
valid_mask = ~np.isnan(vals)
|
|
1052
|
-
if np.any(valid_mask):
|
|
1053
|
-
vals_series = pd.Series(vals).fillna(method='ffill').fillna(method='bfill')
|
|
1054
|
-
if name == 'x':
|
|
1055
|
-
x_interp_vals = vals_series.values
|
|
1056
|
-
elif name == 'y':
|
|
1057
|
-
y_interp_vals = vals_series.values
|
|
1058
|
-
else:
|
|
1059
|
-
z_interp_vals = vals_series.values
|
|
1060
|
-
|
|
1061
|
-
if self._verbose:
|
|
1062
|
-
print(f'Interpolated space_velocities shape: {space_velocities.shape}')
|
|
1063
|
-
print(f'Interpolated position component shapes: x={x_interp_vals.shape}, y={y_interp_vals.shape}, z={z_interp_vals.shape}')
|
|
1064
|
-
|
|
1065
|
-
# Create position array
|
|
1066
|
-
positions = np.column_stack([x_interp_vals, y_interp_vals, z_interp_vals])
|
|
1067
|
-
|
|
1068
|
-
if self._verbose:
|
|
1069
|
-
print(f'Final space_velocities shape: {space_velocities.shape}')
|
|
1070
|
-
print(f'Final positions shape: {positions.shape}')
|
|
1071
|
-
print(f'Space velocities range: {space_velocities.min():.2f} - {space_velocities.max():.2f} m/s')
|
|
1072
|
-
print(f'Position range - x: {positions[:, 0].min():.0f} to {positions[:, 0].max():.0f}')
|
|
1073
|
-
print(f'Position range - y: {positions[:, 1].min():.0f} to {positions[:, 1].max():.0f}')
|
|
1074
|
-
print(f'Position range - z: {positions[:, 2].min():.0f} to {positions[:, 2].max():.0f}')
|
|
1075
|
-
|
|
1076
|
-
# Validate outputs
|
|
1077
|
-
assert isinstance(space_velocities, np.ndarray), 'space_velocities must be numpy array'
|
|
1078
|
-
assert isinstance(positions, np.ndarray), 'positions must be numpy array'
|
|
1079
|
-
assert len(space_velocities.shape) == 1, f'space_velocities must be 1D, got shape {space_velocities.shape}'
|
|
1080
|
-
assert len(positions.shape) == 2, f'positions must be 2D array, got shape {positions.shape}'
|
|
1081
|
-
assert positions.shape[1] == 3, f'positions must have 3 columns (x,y,z), got {positions.shape[1]}'
|
|
1082
|
-
assert space_velocities.shape[0] == positions.shape[0], f'velocity and position arrays must have same length'
|
|
1083
|
-
|
|
1084
|
-
# Final check for NaN values after cleaning
|
|
1085
|
-
assert not np.any(np.isnan(space_velocities)), 'NaN values still present in space_velocities after cleaning'
|
|
1086
|
-
assert not np.any(np.isnan(positions)), 'NaN values still present in positions after cleaning'
|
|
1087
|
-
|
|
1088
|
-
# Check for reasonable values
|
|
1089
|
-
assert np.all(space_velocities > 1000), f'Space velocities too low (min: {space_velocities.min():.2f} m/s)'
|
|
1090
|
-
assert np.all(space_velocities < 20000), f'Space velocities too high (max: {space_velocities.max():.2f} m/s)'
|
|
1091
|
-
|
|
1092
|
-
position_magnitudes = np.linalg.norm(positions, axis=1)
|
|
1093
|
-
assert np.all(position_magnitudes > 6e6), f'Position magnitudes too small (min: {position_magnitudes.min():.0f} m)'
|
|
1094
|
-
assert np.all(position_magnitudes < 8e6), f'Position magnitudes too large (max: {position_magnitudes.max():.0f} m)'
|
|
1095
|
-
|
|
1096
|
-
return space_velocities, positions
|
|
1097
|
-
|
|
1098
|
-
def _compute_ground_velocities(self, space_velocities: np.ndarray, positions: np.ndarray) -> None:
|
|
1099
|
-
"""Compute ground velocities and effective velocities.
|
|
1100
|
-
|
|
1101
|
-
Args:
|
|
1102
|
-
space_velocities: Spacecraft velocity magnitudes (1D array).
|
|
1103
|
-
positions: Spacecraft position vectors (2D array, shape [N, 3]).
|
|
1104
|
-
|
|
1105
|
-
Raises:
|
|
1106
|
-
AssertionError: If input arrays have incompatible shapes.
|
|
1107
|
-
ValueError: If calculations produce invalid results.
|
|
1108
|
-
"""
|
|
1109
|
-
# Enhanced input validation
|
|
1110
|
-
assert isinstance(space_velocities, np.ndarray), f'space_velocities must be numpy array, got {type(space_velocities)}'
|
|
1111
|
-
assert isinstance(positions, np.ndarray), f'positions must be numpy array, got {type(positions)}'
|
|
1112
|
-
assert len(positions.shape) == 2, f'positions must be 2D, got shape {positions.shape}'
|
|
1113
|
-
assert positions.shape[1] == 3, f'positions must have 3 columns, got {positions.shape[1]}'
|
|
1114
|
-
assert space_velocities.shape[0] == positions.shape[0], f'Array lengths must match: velocities={space_velocities.shape[0]}, positions={positions.shape[0]}'
|
|
1115
|
-
|
|
1116
|
-
# Ensure arrays are proper numpy arrays with correct dtypes
|
|
1117
|
-
space_velocities = np.asarray(space_velocities, dtype=np.float64)
|
|
1118
|
-
positions = np.asarray(positions, dtype=np.float64)
|
|
1119
|
-
|
|
1120
|
-
# Check for NaN/inf values
|
|
1121
|
-
assert not np.any(np.isnan(space_velocities)), 'NaN values in space_velocities'
|
|
1122
|
-
assert not np.any(np.isnan(positions)), 'NaN values in positions'
|
|
1123
|
-
assert not np.any(np.isinf(space_velocities)), 'Infinite values in space_velocities'
|
|
1124
|
-
assert not np.any(np.isinf(positions)), 'Infinite values in positions'
|
|
1125
|
-
|
|
1126
|
-
if self._verbose:
|
|
1127
|
-
print('Computing ground velocities...')
|
|
1128
|
-
print(f'Space velocities shape: {space_velocities.shape}')
|
|
1129
|
-
print(f'Positions shape: {positions.shape}')
|
|
1130
|
-
print(f'Slant range vec shape: {self.slant_range_vec.shape}')
|
|
1131
|
-
print(f'Input data ranges:')
|
|
1132
|
-
print(f' Space velocities: {space_velocities.min():.2f} - {space_velocities.max():.2f} m/s')
|
|
1133
|
-
print(f' Positions X: {positions[:, 0].min():.0f} - {positions[:, 0].max():.0f} m')
|
|
1134
|
-
print(f' Positions Y: {positions[:, 1].min():.0f} - {positions[:, 1].max():.0f} m')
|
|
1135
|
-
print(f' Positions Z: {positions[:, 2].min():.0f} - {positions[:, 2].max():.0f} m')
|
|
1136
|
-
|
|
1137
|
-
# Earth model calculations
|
|
1138
|
-
a = float(cnst.WGS84_SEMI_MAJOR_AXIS_M)
|
|
1139
|
-
b = float(cnst.WGS84_SEMI_MINOR_AXIS_M)
|
|
1140
|
-
|
|
1141
|
-
if self._verbose:
|
|
1142
|
-
print(f'Earth model parameters: a={a:.0f} m, b={b:.0f} m')
|
|
1143
|
-
|
|
1144
|
-
# Calculate spacecraft heights (magnitudes of position vectors)
|
|
1145
|
-
H = np.linalg.norm(positions, axis=1) # axis=1 for row-wise norm
|
|
1146
|
-
H = np.asarray(H, dtype=np.float64)
|
|
1147
|
-
|
|
1148
|
-
# Validate H calculation
|
|
1149
|
-
assert H.shape == space_velocities.shape, f'H shape {H.shape} must match velocities shape {space_velocities.shape}'
|
|
1150
|
-
assert not np.any(np.isnan(H)), 'NaN values in H (spacecraft heights)'
|
|
1151
|
-
assert np.all(H > 0), 'All spacecraft heights must be positive'
|
|
1152
|
-
|
|
1153
|
-
W = space_velocities / H
|
|
1154
|
-
W = np.asarray(W, dtype=np.float64)
|
|
1155
|
-
|
|
1156
|
-
# Calculate latitude using more robust method
|
|
1157
|
-
xy_distance = np.sqrt(positions[:, 0]**2 + positions[:, 1]**2)
|
|
1158
|
-
xy_distance = np.asarray(xy_distance, dtype=np.float64)
|
|
1159
|
-
lat = np.arctan2(positions[:, 2], xy_distance)
|
|
1160
|
-
lat = np.asarray(lat, dtype=np.float64)
|
|
1161
|
-
|
|
1162
|
-
if self._verbose:
|
|
1163
|
-
print(f'H (heights) shape: {H.shape}, range: {H.min():.0f} - {H.max():.0f} m')
|
|
1164
|
-
print(f'W (angular velocities) shape: {W.shape}, range: {W.min():.6f} - {W.max():.6f} rad/s')
|
|
1165
|
-
print(f'Latitudes range: {np.degrees(lat.min()):.2f} - {np.degrees(lat.max()):.2f} deg')
|
|
1166
|
-
|
|
1167
|
-
# Validate intermediate calculations
|
|
1168
|
-
assert not np.any(np.isnan(W)), 'NaN values in W (angular velocities)'
|
|
1169
|
-
assert not np.any(np.isnan(lat)), 'NaN values in latitude'
|
|
1170
|
-
|
|
1171
|
-
# Local Earth radius calculation with explicit numpy array operations
|
|
1172
|
-
cos_lat = np.cos(lat)
|
|
1173
|
-
sin_lat = np.sin(lat)
|
|
1174
|
-
cos_lat = np.asarray(cos_lat, dtype=np.float64)
|
|
1175
|
-
sin_lat = np.asarray(sin_lat, dtype=np.float64)
|
|
1176
|
-
|
|
1177
|
-
# Ensure all terms are numpy arrays before sqrt operation
|
|
1178
|
-
numerator = np.asarray(a**4 * cos_lat**2 + b**4 * sin_lat**2, dtype=np.float64)
|
|
1179
|
-
denominator = np.asarray(a**2 * cos_lat**2 + b**2 * sin_lat**2, dtype=np.float64)
|
|
1180
|
-
ratio = numerator / denominator
|
|
1181
|
-
ratio = np.asarray(ratio, dtype=np.float64)
|
|
1182
|
-
|
|
1183
|
-
local_earth_rad = np.sqrt(ratio)
|
|
1184
|
-
local_earth_rad = np.asarray(local_earth_rad, dtype=np.float64)
|
|
1185
|
-
|
|
1186
|
-
if self._verbose:
|
|
1187
|
-
print(f'Local Earth radius range: {local_earth_rad.min():.0f} - {local_earth_rad.max():.0f} m')
|
|
1188
|
-
|
|
1189
|
-
# Validate local Earth radius
|
|
1190
|
-
assert not np.any(np.isnan(local_earth_rad)), 'NaN values in local_earth_rad'
|
|
1191
|
-
assert np.all(local_earth_rad > 0), 'All local Earth radii must be positive'
|
|
1192
|
-
|
|
1193
|
-
# Ensure slant_range_vec is also a proper numpy array
|
|
1194
|
-
slant_range_vec = np.asarray(self.slant_range_vec, dtype=np.float64)
|
|
1195
|
-
|
|
1196
|
-
# Broadcasting for slant range calculation
|
|
1197
|
-
slant_range_2d = slant_range_vec[np.newaxis, :] # Shape: [1, range_samples]
|
|
1198
|
-
local_earth_rad_2d = local_earth_rad[:, np.newaxis] # Shape: [azimuth_samples, 1]
|
|
1199
|
-
H_2d = H[:, np.newaxis] # Shape: [azimuth_samples, 1]
|
|
1200
|
-
W_2d = W[:, np.newaxis] # Shape: [azimuth_samples, 1]
|
|
1201
|
-
|
|
1202
|
-
# Calculate cosine of look angle with explicit array operations
|
|
1203
|
-
term1 = np.asarray(local_earth_rad_2d**2, dtype=np.float64)
|
|
1204
|
-
term2 = np.asarray(H_2d**2, dtype=np.float64)
|
|
1205
|
-
term3 = np.asarray(slant_range_2d**2, dtype=np.float64)
|
|
1206
|
-
term4 = np.asarray(2 * local_earth_rad_2d * H_2d, dtype=np.float64)
|
|
1207
|
-
|
|
1208
|
-
cos_beta = (term1 + term2 - term3) / term4
|
|
1209
|
-
cos_beta = np.asarray(cos_beta, dtype=np.float64)
|
|
1210
|
-
|
|
1211
|
-
# Clip to valid range for cosine
|
|
1212
|
-
cos_beta = np.clip(cos_beta, -1.0, 1.0)
|
|
1213
|
-
|
|
1214
|
-
if self._verbose:
|
|
1215
|
-
print(f'cos_beta shape: {cos_beta.shape}')
|
|
1216
|
-
print(f'cos_beta range: {cos_beta.min():.3f} - {cos_beta.max():.3f}')
|
|
1217
|
-
|
|
1218
|
-
# Calculate ground velocities
|
|
1219
|
-
ground_velocities = local_earth_rad_2d * W_2d * cos_beta
|
|
1220
|
-
ground_velocities = np.asarray(ground_velocities, dtype=np.float64)
|
|
1221
|
-
|
|
1222
|
-
if self._verbose:
|
|
1223
|
-
print(f'Ground velocities shape: {ground_velocities.shape}')
|
|
1224
|
-
|
|
1225
|
-
# Calculate effective velocities
|
|
1226
|
-
space_velocities_2d = space_velocities[:, np.newaxis] # Shape: [azimuth_samples, 1]
|
|
1227
|
-
effective_vel_product = space_velocities_2d * ground_velocities
|
|
1228
|
-
effective_vel_product = np.asarray(effective_vel_product, dtype=np.float64)
|
|
1229
|
-
|
|
1230
|
-
# Ensure non-negative values before sqrt
|
|
1231
|
-
effective_vel_product = np.maximum(effective_vel_product, 0.0)
|
|
1232
|
-
|
|
1233
|
-
self.effective_velocities = np.sqrt(effective_vel_product)
|
|
1234
|
-
self.effective_velocities = np.asarray(self.effective_velocities, dtype=np.float64)
|
|
1235
|
-
|
|
1236
|
-
if self._verbose:
|
|
1237
|
-
print(f'Effective velocities shape: {self.effective_velocities.shape}')
|
|
1238
|
-
print(f'Effective velocities range: {self.effective_velocities.min():.2f} - {self.effective_velocities.max():.2f} m/s')
|
|
1239
|
-
|
|
1240
|
-
# Final validation
|
|
1241
|
-
assert not np.any(np.isnan(self.effective_velocities)), 'NaN values in effective velocities'
|
|
1242
|
-
assert not np.any(np.isinf(self.effective_velocities)), 'Infinite values in effective velocities'
|
|
1243
|
-
assert np.all(self.effective_velocities >= 0), 'All effective velocities must be non-negative'
|
|
1244
|
-
|
|
1245
|
-
# ==================== MAIN PROCESSING METHODS ====================
|
|
1246
|
-
|
|
1247
|
-
@flush_mem
|
|
1248
|
-
@timing_decorator
|
|
1249
|
-
def data_focus(self) -> None:
|
|
1250
|
-
"""Perform memory-efficient complete SAR data focusing using Range Doppler Algorithm.
|
|
1251
|
-
|
|
1252
|
-
This method orchestrates the complete SAR focusing process by calling
|
|
1253
|
-
three main processing steps in sequence.
|
|
1254
|
-
|
|
1255
|
-
Raises:
|
|
1256
|
-
RuntimeError: If data dimensions change unexpectedly during processing.
|
|
1257
|
-
"""
|
|
1258
|
-
if self._verbose:
|
|
1259
|
-
print('Starting memory-efficient SAR data focusing...')
|
|
1260
|
-
print(f'Initial radar data shape: {self.radar_data.shape}')
|
|
1261
|
-
print_memory()
|
|
1262
|
-
|
|
1263
|
-
# Store initial shape for verification
|
|
1264
|
-
initial_shape = self.radar_data.shape
|
|
1265
|
-
expected_shape = (self.len_az_line, self.len_range_line)
|
|
1266
|
-
|
|
1267
|
-
assert initial_shape == expected_shape, \
|
|
1268
|
-
f'Initial data shape {initial_shape} does not match expected {expected_shape}'
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
self.raw_data = copy.deepcopy(self.radar_data)
|
|
1272
|
-
if self._verbose:
|
|
1273
|
-
print(f'Raw radar data shape: {self.raw_data.shape}')
|
|
1274
|
-
print_memory()
|
|
1275
|
-
# ------------------------------------------------------------------------
|
|
1276
|
-
# Step 1: 2D FFT transformation (preserves dimensions)
|
|
1277
|
-
self.fft2d()
|
|
1278
|
-
assert self.radar_data.shape == initial_shape, \
|
|
1279
|
-
f'FFT changed data shape from {initial_shape} to {self.radar_data.shape}'
|
|
1280
|
-
# ------------------------------------------------------------------------
|
|
1281
|
-
|
|
1282
|
-
# Step 2: Range compression
|
|
1283
|
-
self.range_compression()
|
|
1284
|
-
self.range_compressed_data = ifft2d(copy.deepcopy(self.radar_data))
|
|
1285
|
-
if self._verbose:
|
|
1286
|
-
print(f'Range compressed data shape: {self.radar_data.shape}')
|
|
1287
|
-
print_memory()
|
|
1288
|
-
# ------------------------------------------------------------------------
|
|
1289
|
-
|
|
1290
|
-
# Step 3: Range Cell Migration Correction
|
|
1291
|
-
self.rcmc()
|
|
1292
|
-
self.rcmc_data = iff_azimuth(copy.deepcopy(self.radar_data))
|
|
1293
|
-
if self._verbose:
|
|
1294
|
-
print(f'RCMC data shape: {self.radar_data.shape}')
|
|
1295
|
-
print_memory()
|
|
1296
|
-
# ------------------------------------------------------------------------
|
|
1297
|
-
# Step 4: Azimuth compression
|
|
1298
|
-
self.azimuth_compression()
|
|
1299
|
-
self.azimuth_compressed_data = self.radar_data
|
|
1300
|
-
if self._verbose:
|
|
1301
|
-
print(f'SAR data focusing completed successfully!')
|
|
1302
|
-
print(f'Final radar data shape: {self.radar_data.shape}')
|
|
1303
|
-
print_memory()
|
|
1304
|
-
|
|
1305
|
-
@flush_mem
|
|
1306
|
-
@timing_decorator
|
|
1307
|
-
def range_compression(self) -> None:
|
|
1308
|
-
"""Perform memory-efficient range compression step.
|
|
1309
|
-
|
|
1310
|
-
This method applies the range compression filter to compress the radar
|
|
1311
|
-
signal in the range dimension while preserving data dimensions.
|
|
1312
|
-
|
|
1313
|
-
Raises:
|
|
1314
|
-
RuntimeError: If data dimensions change unexpectedly during processing.
|
|
1315
|
-
"""
|
|
1316
|
-
if self._verbose:
|
|
1317
|
-
print('Starting range compression...')
|
|
1318
|
-
print(f'Input radar data shape: {self.radar_data.shape}')
|
|
1319
|
-
print_memory()
|
|
1320
|
-
|
|
1321
|
-
# Store initial shape for verification
|
|
1322
|
-
initial_shape = self.radar_data.shape
|
|
1323
|
-
|
|
1324
|
-
# Legacy compatibility parameters
|
|
1325
|
-
w_pad = 0
|
|
1326
|
-
original_w = initial_shape[1]
|
|
1327
|
-
|
|
1328
|
-
if self._verbose:
|
|
1329
|
-
print(f'Processing with original_w={original_w}')
|
|
1330
|
-
|
|
1331
|
-
# Perform range compression
|
|
1332
|
-
self._perform_range_compression_efficient(w_pad, original_w)
|
|
1333
|
-
|
|
1334
|
-
# Verify dimensions are preserved
|
|
1335
|
-
assert self.radar_data.shape == initial_shape, \
|
|
1336
|
-
f'Range compression changed data shape from {initial_shape} to {self.radar_data.shape}'
|
|
1337
|
-
|
|
1338
|
-
if self._verbose:
|
|
1339
|
-
print(f'Range compression completed successfully!')
|
|
1340
|
-
print(f'Output radar data shape: {self.radar_data.shape}')
|
|
1341
|
-
print_memory()
|
|
1342
|
-
|
|
1343
|
-
@flush_mem
|
|
1344
|
-
@timing_decorator
|
|
1345
|
-
def rcmc(self) -> None:
|
|
1346
|
-
"""Perform memory-efficient Range Cell Migration Correction.
|
|
1347
|
-
|
|
1348
|
-
This method applies the RCMC filter to correct for range cell migration
|
|
1349
|
-
effects and performs inverse FFT in the range dimension.
|
|
1350
|
-
|
|
1351
|
-
Raises:
|
|
1352
|
-
RuntimeError: If data dimensions change unexpectedly during processing.
|
|
1353
|
-
"""
|
|
1354
|
-
if self._verbose:
|
|
1355
|
-
print('Starting Range Cell Migration Correction...')
|
|
1356
|
-
print(f'Input radar data shape: {self.radar_data.shape}')
|
|
1357
|
-
print_memory()
|
|
1358
|
-
|
|
1359
|
-
# Store initial shape for verification
|
|
1360
|
-
initial_shape = self.radar_data.shape
|
|
1361
|
-
|
|
1362
|
-
# Perform RCMC
|
|
1363
|
-
self._perform_rcmc_efficient()
|
|
1364
|
-
|
|
1365
|
-
# Verify dimensions are preserved
|
|
1366
|
-
assert self.radar_data.shape == initial_shape, \
|
|
1367
|
-
f'RCMC changed data shape from {initial_shape} to {self.radar_data.shape}'
|
|
1368
|
-
|
|
1369
|
-
if self._verbose:
|
|
1370
|
-
print(f'RCMC completed successfully!')
|
|
1371
|
-
print(f'Output radar data shape: {self.radar_data.shape}')
|
|
1372
|
-
print_memory()
|
|
1373
|
-
|
|
1374
|
-
@flush_mem
|
|
1375
|
-
@timing_decorator
|
|
1376
|
-
def azimuth_compression(self) -> None:
|
|
1377
|
-
"""Perform memory-efficient azimuth compression step.
|
|
1378
|
-
|
|
1379
|
-
This method applies the azimuth compression filter to focus the radar
|
|
1380
|
-
signal in the azimuth dimension and performs inverse FFT in azimuth.
|
|
1381
|
-
|
|
1382
|
-
Raises:
|
|
1383
|
-
RuntimeError: If data dimensions change unexpectedly during processing.
|
|
1384
|
-
"""
|
|
1385
|
-
if self._verbose:
|
|
1386
|
-
print('Starting azimuth compression...')
|
|
1387
|
-
print(f'Input radar data shape: {self.radar_data.shape}')
|
|
1388
|
-
print_memory()
|
|
1389
|
-
|
|
1390
|
-
# Store initial shape for verification
|
|
1391
|
-
initial_shape = self.radar_data.shape
|
|
1392
|
-
|
|
1393
|
-
# Perform azimuth compression
|
|
1394
|
-
self._perform_azimuth_compression_efficient()
|
|
1395
|
-
|
|
1396
|
-
# Verify dimensions are preserved
|
|
1397
|
-
assert self.radar_data.shape == initial_shape, \
|
|
1398
|
-
f'Azimuth compression changed data shape from {initial_shape} to {self.radar_data.shape}'
|
|
1399
|
-
|
|
1400
|
-
if self._verbose:
|
|
1401
|
-
print(f'Azimuth compression completed successfully!')
|
|
1402
|
-
print(f'Output radar data shape: {self.radar_data.shape}')
|
|
1403
|
-
print_memory()
|
|
1404
|
-
|
|
1405
|
-
def _perform_range_compression_efficient(self, w_pad: int, original_w: int) -> None:
|
|
1406
|
-
"""Perform memory-efficient range compression step while preserving data dimensions.
|
|
1407
|
-
|
|
1408
|
-
Args:
|
|
1409
|
-
w_pad: Width padding (ignored - dimensions preserved).
|
|
1410
|
-
original_w: Original width (for verification).
|
|
1411
|
-
|
|
1412
|
-
Raises:
|
|
1413
|
-
ValueError: If array shapes are incompatible.
|
|
1414
|
-
AssertionError: If dimensions change unexpectedly.
|
|
1415
|
-
"""
|
|
1416
|
-
if self._verbose:
|
|
1417
|
-
print(f'Starting memory-efficient range compression...')
|
|
1418
|
-
print(f'Radar data shape: {self.radar_data.shape}')
|
|
1419
|
-
print_memory()
|
|
1420
|
-
|
|
1421
|
-
# Store original shape for verification
|
|
1422
|
-
original_shape = self.radar_data.shape
|
|
1423
|
-
expected_shape = (self.len_az_line, self.len_range_line)
|
|
1424
|
-
|
|
1425
|
-
# Verify we still have expected dimensions
|
|
1426
|
-
assert original_shape == expected_shape, \
|
|
1427
|
-
f'Unexpected radar data shape: {original_shape}, expected: {expected_shape}'
|
|
1428
|
-
|
|
1429
|
-
# Get range filter with matching dimensions
|
|
1430
|
-
range_filter = self.get_range_filter()
|
|
1431
|
-
|
|
1432
|
-
if self._verbose:
|
|
1433
|
-
print(f'Range filter shape: {range_filter.shape}')
|
|
1434
|
-
print(f'Applying range compression filter...')
|
|
1435
|
-
|
|
1436
|
-
# Apply range compression filter - USE SAME METHOD AS ORIGINAL
|
|
1437
|
-
self.radar_data = multiply(self.radar_data, range_filter)
|
|
1438
|
-
|
|
1439
|
-
# Cleanup filter
|
|
1440
|
-
cleanup_variables(range_filter)
|
|
1441
|
-
|
|
1442
|
-
# Verify dimensions are preserved
|
|
1443
|
-
assert self.radar_data.shape == original_shape, \
|
|
1444
|
-
f'Range compression changed data shape from {original_shape} to {self.radar_data.shape}'
|
|
1445
|
-
|
|
1446
|
-
if self._verbose:
|
|
1447
|
-
print(f'Range compression completed. Data shape: {self.radar_data.shape}')
|
|
1448
|
-
print_memory()
|
|
1449
|
-
|
|
1450
|
-
def _perform_rcmc_efficient(self) -> None:
|
|
1451
|
-
"""Perform memory-efficient Range Cell Migration Correction."""
|
|
1452
|
-
if self._verbose:
|
|
1453
|
-
print('Starting memory-efficient RCMC...')
|
|
1454
|
-
print_memory()
|
|
1455
|
-
|
|
1456
|
-
rcmc_filter = self.get_rcmc()
|
|
1457
|
-
|
|
1458
|
-
# Use SAME METHOD AS ORIGINAL
|
|
1459
|
-
self.radar_data = multiply(self.radar_data, rcmc_filter)
|
|
1460
|
-
|
|
1461
|
-
# Cleanup filter
|
|
1462
|
-
cleanup_variables(rcmc_filter)
|
|
1463
|
-
|
|
1464
|
-
# Inverse FFT in range
|
|
1465
|
-
self.ifft_range()
|
|
1466
|
-
|
|
1467
|
-
if self._verbose:
|
|
1468
|
-
print('RCMC completed.')
|
|
1469
|
-
print_memory()
|
|
1470
|
-
|
|
1471
|
-
def _perform_azimuth_compression_efficient(self) -> None:
|
|
1472
|
-
"""Perform memory-efficient azimuth compression step.
|
|
1473
|
-
|
|
1474
|
-
Raises:
|
|
1475
|
-
ValueError: If array shapes are incompatible.
|
|
1476
|
-
"""
|
|
1477
|
-
if self._verbose:
|
|
1478
|
-
print('Starting memory-efficient azimuth compression...')
|
|
1479
|
-
print(f'Radar data shape before azimuth filter: {self.radar_data.shape}')
|
|
1480
|
-
print_memory()
|
|
1481
|
-
|
|
1482
|
-
# Get azimuth filter
|
|
1483
|
-
azimuth_filter = self.get_azimuth_filter()
|
|
1484
|
-
|
|
1485
|
-
if self._verbose:
|
|
1486
|
-
print(f'Azimuth filter shape: {azimuth_filter.shape}')
|
|
1487
|
-
|
|
1488
|
-
# Apply azimuth compression - USE SAME METHOD AS ORIGINAL
|
|
1489
|
-
self.radar_data = multiply(self.radar_data, azimuth_filter)
|
|
1490
|
-
|
|
1491
|
-
# Cleanup filter
|
|
1492
|
-
cleanup_variables(azimuth_filter)
|
|
1493
|
-
|
|
1494
|
-
if self._verbose:
|
|
1495
|
-
print(f'Radar data shape after azimuth compression: {self.radar_data.shape}')
|
|
1496
|
-
|
|
1497
|
-
# Inverse FFT in azimuth
|
|
1498
|
-
self.ifft_azimuth()
|
|
1499
|
-
|
|
1500
|
-
if self._verbose:
|
|
1501
|
-
print(f'Final radar data shape: {self.radar_data.shape}')
|
|
1502
|
-
print_memory()
|
|
1503
|
-
|
|
1504
|
-
# ==================== UTILITY METHODS ====================
|
|
1505
|
-
|
|
1506
|
-
|
|
1507
|
-
@timing_decorator
|
|
1508
|
-
def save_file(self, save_path: Union[str, Path]) -> None:
|
|
1509
|
-
"""Save processed radar data to file.
|
|
1510
|
-
|
|
1511
|
-
Args:
|
|
1512
|
-
save_path: Path where to save the data.
|
|
1513
|
-
"""
|
|
1514
|
-
dump(self.radar_data, save_path)
|
|
1515
|
-
if self._verbose:
|
|
1516
|
-
print(f'Data saved to {save_path}')
|
|
1517
|
-
|
|
1518
|
-
|
|
1519
|
-
# ==================== EEEZY ====================
|
|
1520
|
-
# For backward compatibility - keep original method name as alias
|
|
1521
|
-
_prompt_tx_replica = _generate_tx_replica
|
|
1522
|
-
|
|
1523
|
-
# ==================== GRAPHIC METHODS ====================
|
|
1524
|
-
def _display_slice(self, slice=(0, 4000, 0, 4000),
|
|
1525
|
-
vmin=0, vmax=1000,
|
|
1526
|
-
figsize=(12,12),
|
|
1527
|
-
title = None,
|
|
1528
|
-
step: str = 'az') -> None:
|
|
1529
|
-
"""Display a slice of the radar data for visualization."""
|
|
1530
|
-
|
|
1531
|
-
assert step in ['raw', 'range_compressed', 'rcmc', 'az_compressed'], \
|
|
1532
|
-
'Invalid step. Choose from "raw", "range_compressed", "rcmc", or "az_compressed".'
|
|
1533
|
-
|
|
1534
|
-
if step == 'raw':
|
|
1535
|
-
data = self.raw_data
|
|
1536
|
-
elif step == 'range_compressed':
|
|
1537
|
-
data = self.range_compressed_data
|
|
1538
|
-
elif step == 'rcmc':
|
|
1539
|
-
data = self.rcmc_data
|
|
1540
|
-
elif step == 'az_compressed':
|
|
1541
|
-
data = self.radar_data
|
|
1542
|
-
else:
|
|
1543
|
-
raise ValueError(f'Invalid step: {step}. Choose from "raw", "range", "rcmc", or "az".')
|
|
1544
|
-
|
|
1545
|
-
plt.figure(figsize=figsize)
|
|
1546
|
-
plt.imshow(np.abs(data[slice[0]:slice[1], slice[2]:slice[3]]), vmin=vmin, vmax=vmax, cmap='viridis')
|
|
1547
|
-
plt.axis('off')
|
|
1548
|
-
if title:
|
|
1549
|
-
plt.title(title)
|
|
1550
|
-
plt.show()
|