sarpyx 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. docs/examples/advanced/batch_processing.py +1 -1
  2. docs/examples/advanced/custom_processing_chains.py +1 -1
  3. docs/examples/advanced/performance_optimization.py +1 -1
  4. docs/examples/basic/snap_integration.py +1 -1
  5. docs/examples/intermediate/quality_assessment.py +1 -1
  6. outputs/baseline/20260205-234828/__init__.py +33 -0
  7. outputs/baseline/20260205-234828/main.py +493 -0
  8. outputs/final/20260205-234851/__init__.py +33 -0
  9. outputs/final/20260205-234851/main.py +493 -0
  10. sarpyx/__init__.py +2 -2
  11. sarpyx/algorithms/__init__.py +2 -2
  12. sarpyx/cli/__init__.py +1 -1
  13. sarpyx/cli/focus.py +3 -5
  14. sarpyx/cli/main.py +106 -7
  15. sarpyx/cli/shipdet.py +1 -1
  16. sarpyx/cli/worldsar.py +549 -0
  17. sarpyx/processor/__init__.py +1 -1
  18. sarpyx/processor/core/decode.py +43 -8
  19. sarpyx/processor/core/focus.py +104 -57
  20. sarpyx/science/__init__.py +1 -1
  21. sarpyx/sla/__init__.py +8 -0
  22. sarpyx/sla/metrics.py +101 -0
  23. sarpyx/{snap → snapflow}/__init__.py +1 -1
  24. sarpyx/snapflow/engine.py +6165 -0
  25. sarpyx/{snap → snapflow}/op.py +0 -1
  26. sarpyx/utils/__init__.py +1 -1
  27. sarpyx/utils/geos.py +652 -0
  28. sarpyx/utils/grid.py +285 -0
  29. sarpyx/utils/io.py +77 -9
  30. sarpyx/utils/meta.py +55 -0
  31. sarpyx/utils/nisar_utils.py +652 -0
  32. sarpyx/utils/rfigen.py +108 -0
  33. sarpyx/utils/wkt_utils.py +109 -0
  34. sarpyx/utils/zarr_utils.py +55 -37
  35. {sarpyx-0.1.5.dist-info → sarpyx-0.1.6.dist-info}/METADATA +9 -5
  36. {sarpyx-0.1.5.dist-info → sarpyx-0.1.6.dist-info}/RECORD +41 -32
  37. {sarpyx-0.1.5.dist-info → sarpyx-0.1.6.dist-info}/WHEEL +1 -1
  38. sarpyx-0.1.6.dist-info/licenses/LICENSE +201 -0
  39. sarpyx-0.1.6.dist-info/top_level.txt +4 -0
  40. tests/test_zarr_compat.py +35 -0
  41. sarpyx/processor/core/decode_v0.py +0 -0
  42. sarpyx/processor/core/decode_v1.py +0 -849
  43. sarpyx/processor/core/focus_old.py +0 -1550
  44. sarpyx/processor/core/focus_v1.py +0 -1566
  45. sarpyx/processor/core/focus_v2.py +0 -1625
  46. sarpyx/snap/engine.py +0 -633
  47. sarpyx-0.1.5.dist-info/top_level.txt +0 -2
  48. {sarpyx-0.1.5.dist-info → sarpyx-0.1.6.dist-info}/entry_points.txt +0 -0
@@ -1,1625 +0,0 @@
1
- import argparse
2
- from typing import Dict, Any, Optional, Union, Tuple, Callable, List
3
- try:
4
- import torch
5
- except ImportError:
6
- print('Unable to import torch module')
7
- torch = None
8
- import pickle
9
- import pandas as pd
10
- import numpy as np
11
- from scipy.interpolate import interp1d
12
- import math
13
- from pathlib import Path
14
- import matplotlib.pyplot as plt
15
- import copy
16
- import gc
17
- from functools import wraps
18
- import psutil
19
- import time
20
- from os import environ
21
-
22
- try:
23
- import zarr
24
- import numcodecs
25
- ZARR_AVAILABLE = True
26
- except ImportError:
27
- print('Warning: zarr not available, falling back to pickle for saving')
28
- ZARR_AVAILABLE = False
29
-
30
- # ---------- Import custom modules ----------
31
- from .code2physical import range_dec_to_sample_rate
32
- from .transforms import perform_fft_custom
33
- from . import constants as cnst
34
- from ..utils.viz import dump
35
-
36
-
37
- # ---------- Global settings ----------
38
- environ['OMP_NUM_THREADS'] = '12' # Set OpenMP threads for parallel processing
39
- __VTIMING__ = False
40
-
41
-
42
-
43
- # ---------- Decorators and utility functions ----------
44
- def timing_decorator(func: Callable) -> Callable:
45
- """Decorator to measure and print function execution time.
46
-
47
- Args:
48
- func: The function to measure.
49
-
50
- Returns:
51
- The wrapped function with timing measurement.
52
- """
53
- @wraps(func)
54
- def wrapper(*args, **kwargs):
55
- start_time = time.time()
56
- result = func(*args, **kwargs)
57
- elapsed_time = time.time() - start_time
58
- if __VTIMING__:
59
- print(f'Elapsed time for {func.__name__}: {elapsed_time:.4f} seconds')
60
- else:
61
- # Only print if __VTIMING__ is enabled
62
- pass
63
- return result
64
- return wrapper
65
-
66
- def print_memory() -> None:
67
- """Print current RAM memory usage percentage."""
68
- print(f'RAM memory usage: {psutil.virtual_memory().percent}%')
69
-
70
- def flush_mem(func: Callable) -> Callable:
71
- """Decorator for memory-efficient operations with monitoring.
72
-
73
- Args:
74
- func: The function to wrap.
75
-
76
- Returns:
77
- The wrapped function with memory monitoring and cleanup.
78
- """
79
- @wraps(func)
80
- def wrapper(*args, **kwargs):
81
- # Monitor memory before
82
- initial_memory = psutil.virtual_memory().percent
83
-
84
- # Execute function
85
- result = func(*args, **kwargs)
86
-
87
- # Force garbage collection
88
- gc.collect()
89
-
90
- # Monitor memory after
91
- final_memory = psutil.virtual_memory().percent
92
-
93
- # Print memory change if verbose
94
- if hasattr(args[0], '_verbose') and args[0]._verbose:
95
- print(f'Memory usage: {initial_memory:.1f}% -> {final_memory:.1f}% '
96
- f'(Δ{final_memory - initial_memory:+.1f}%)')
97
-
98
- return result
99
- return wrapper
100
-
101
- def cleanup_variables(*variables: Any) -> None:
102
- """Explicitly delete variables and run garbage collection.
103
-
104
- Args:
105
- *variables: Variables to delete.
106
- """
107
- for var in variables:
108
- del var
109
- gc.collect()
110
-
111
- def initialize_params(
112
- device: Optional[torch.device] = None,
113
- slant_range_vec: Optional[np.ndarray] = None,
114
- D: Optional[np.ndarray] = None,
115
- c: Optional[float] = None,
116
- len_range_line: Optional[int] = None,
117
- range_sample_freq: Optional[float] = None,
118
- wavelength: Optional[float] = None
119
- ) -> Dict[str, Any]:
120
- """Initialize processing parameters dictionary.
121
-
122
- Args:
123
- device: PyTorch device for computation.
124
- slant_range_vec: Slant range vector.
125
- D: Cosine of instantaneous squint angle.
126
- c: Speed of light.
127
- len_range_line: Length of range line.
128
- range_sample_freq: Range sampling frequency.
129
- wavelength: Radar wavelength.
130
-
131
- Returns:
132
- Dictionary containing all parameters.
133
- """
134
- return {key: value for key, value in locals().items()}
135
-
136
- def multiply_inplace(
137
- a: Union[np.ndarray, torch.Tensor],
138
- b: Union[np.ndarray, torch.Tensor]
139
- ) -> Union[np.ndarray, torch.Tensor]:
140
- """Multiply two arrays element-wise in-place with broadcasting support.
141
-
142
- Args:
143
- a: First array (modified in-place).
144
- b: Second array.
145
-
146
- Returns:
147
- Reference to modified first array.
148
-
149
- Raises:
150
- ValueError: If arrays have incompatible shapes for broadcasting.
151
- """
152
- if hasattr(a, 'shape') and hasattr(b, 'shape'):
153
- # Check if shapes are compatible for broadcasting
154
- if a.shape != b.shape and b.size != 1 and a.size != 1:
155
- # For 2D array * 1D array, the 1D array should match one of the 2D dimensions
156
- if len(a.shape) == 2 and len(b.shape) == 1:
157
- if b.shape[0] == a.shape[1]:
158
- # Broadcasting along range dimension - use numpy broadcasting
159
- pass # NumPy will handle this automatically
160
- elif b.shape[0] == a.shape[0]:
161
- # Need to reshape for azimuth dimension broadcasting
162
- b = b.reshape(-1, 1)
163
- else:
164
- raise ValueError(f'1D array length ({b.shape[0]}) does not match either dimension of 2D array {a.shape}')
165
-
166
- # Perform in-place multiplication
167
- try:
168
- if isinstance(a, np.ndarray):
169
- np.multiply(a, b, out=a)
170
- else: # torch tensor
171
- a.mul_(b)
172
- return a
173
- except (ValueError, RuntimeError) as e:
174
- raise ValueError(f'Arrays have incompatible shapes for in-place broadcasting: {a.shape} and {b.shape}. '
175
- f'Original error: {str(e)}') from e
176
-
177
- def multiply(
178
- a: Union[np.ndarray, torch.Tensor],
179
- b: Union[np.ndarray, torch.Tensor],
180
- debug: bool = False,
181
- ) -> Union[np.ndarray, torch.Tensor]:
182
- """Multiply two arrays element-wise with broadcasting support.
183
-
184
- Args:
185
- a: First array.
186
- b: Second array.
187
-
188
- Returns:
189
- Element-wise multiplication result.
190
-
191
- Raises:
192
- ValueError: If arrays have incompatible shapes for broadcasting.
193
- """
194
- if hasattr(a, 'shape') and hasattr(b, 'shape'):
195
- # Check if shapes are compatible for broadcasting
196
- if a.shape != b.shape and b.size != 1 and a.size != 1:
197
- # Try to understand the broadcasting scenario
198
- if debug:
199
- print(f'Debug: Attempting to multiply arrays with shapes {a.shape} and {b.shape}')
200
-
201
- # For 2D array * 1D array, the 1D array should match one of the 2D dimensions
202
- if len(a.shape) == 2 and len(b.shape) == 1:
203
- if debug:
204
- if b.shape[0] == a.shape[1]:
205
- print(f'Debug: Broadcasting 1D array along range dimension (axis=1)')
206
- elif b.shape[0] == a.shape[0]:
207
- print(f'Debug: Need to reshape 1D array for azimuth dimension (axis=0)')
208
- b = b.reshape(-1, 1) # Reshape for broadcasting along azimuth
209
- else:
210
- raise ValueError(f'1D array length ({b.shape[0]}) does not match either dimension of 2D array {a.shape}')
211
-
212
- # Allow broadcasting for compatible shapes
213
- try:
214
- result = a * b
215
- if debug:
216
- print(f'Debug: Broadcasting successful, result shape: {result.shape}')
217
- return result
218
- except (ValueError, RuntimeError) as e:
219
- print(f'Debug: Broadcasting failed with error: {str(e)}')
220
- raise ValueError(f'Arrays have incompatible shapes for broadcasting: {a.shape} and {b.shape}. '
221
- f'Original error: {str(e)}') from e
222
-
223
- return a * b
224
-
225
- @flush_mem
226
- @timing_decorator
227
- def ifft2d(radar_data: Union[np.ndarray, torch.Tensor], backend: str = 'numpy', verbose: bool = False) -> Union[np.ndarray, torch.Tensor]:
228
- """Perform memory-efficient 2D inverse FFT on radar data.
229
-
230
- Args:
231
- radar_data: Input radar data array.
232
- backend: Backend to use ('numpy' or 'torch').
233
- verbose: Whether to print verbose output.
234
-
235
- Returns:
236
- Processed radar data after 2D inverse FFT.
237
-
238
- Raises:
239
- ValueError: If backend is not supported.
240
- """
241
- if verbose:
242
- print('Performing 2D inverse FFT...')
243
-
244
- # Inverse FFT along azimuth dimension first
245
- if backend == 'numpy':
246
- radar_data = np.fft.ifft(radar_data, axis=0)
247
- elif backend == 'torch':
248
- radar_data = torch.fft.ifft(radar_data, dim=0)
249
- else:
250
- raise ValueError(f'Unsupported backend: {backend}')
251
-
252
- # Then inverse FFT along range dimension
253
- if backend == 'numpy':
254
- radar_data = np.fft.ifftshift(np.fft.ifft(radar_data, axis=1), axes=1)
255
- elif backend == 'torch':
256
- radar_data = torch.fft.ifft(radar_data, dim=1)
257
- radar_data = torch.fft.ifftshift(radar_data, dim=1)
258
- else:
259
- raise ValueError(f'Unsupported backend: {backend}')
260
-
261
- if verbose:
262
- print(f'2D inverse FFT completed, data shape: {radar_data.shape}')
263
- print_memory()
264
-
265
- return radar_data
266
-
267
- @flush_mem
268
- @timing_decorator
269
- def iff_azimuth(
270
- radar_data: Union[np.ndarray, torch.Tensor],
271
- backend: str = 'numpy',
272
- verbose: bool = False
273
- ) -> Union[np.ndarray, torch.Tensor]:
274
- """Perform memory-efficient inverse FFT along azimuth dimension.
275
-
276
- Args:
277
- radar_data: Input radar data array.
278
- backend: Backend to use ('numpy' or 'torch').
279
- verbose: Whether to print verbose output.
280
-
281
- Returns:
282
- Processed radar data after inverse FFT along azimuth dimension.
283
-
284
- Raises:
285
- ValueError: If backend is not supported.
286
- """
287
- if verbose:
288
- print('Performing inverse FFT along azimuth dimension...')
289
-
290
- if backend == 'numpy':
291
- radar_data = np.fft.ifft(radar_data, axis=0)
292
- elif backend == 'torch':
293
- radar_data = torch.fft.ifft(radar_data, dim=0)
294
- else:
295
- raise ValueError(f'Unsupported backend: {backend}')
296
-
297
- if verbose:
298
- print(f'Inverse FFT along azimuth completed, data shape: {radar_data.shape}')
299
- print_memory()
300
-
301
- return radar_data
302
-
303
- # -------- Processing Class ----------
304
- class CoarseRDA:
305
- """Memory-efficient Coarse Range Doppler Algorithm processor for SAR data.
306
-
307
- This class implements a memory-optimized coarse Range Doppler Algorithm for processing
308
- synthetic aperture radar (SAR) data, specifically designed for Sentinel-1 data.
309
-
310
- The processing pipeline follows these main steps:
311
- 1. Initialization and data loading
312
- 2. Transmission replica generation
313
- 3. 2D FFT transformation
314
- 4. Range compression
315
- 5. Range Cell Migration Correction (RCMC)
316
- 6. Azimuth compression
317
- 7. Final inverse transformations
318
-
319
- Memory optimizations include:
320
- - In-place operations where possible
321
- - Explicit memory cleanup
322
- - Memory usage monitoring
323
- - Efficient data type usage
324
- """
325
-
326
- # ==================== INITIALIZATION METHODS ====================
327
-
328
- def __init__(
329
- self,
330
- raw_data: Dict[str, Any],
331
- verbose: bool = False,
332
- backend: str = 'numpy',
333
- memory_efficient: bool = True
334
- ) -> None:
335
- """Initialize the CoarseRDA processor.
336
-
337
- Args:
338
- raw_data: Dictionary containing 'echo', 'ephemeris', and 'metadata'.
339
- verbose: Whether to print verbose output.
340
- backend: Backend to use ('numpy', 'torch', or 'custom').
341
- memory_efficient: Whether to enable memory-efficient mode.
342
-
343
- Raises:
344
- ValueError: If invalid backend is specified.
345
- AssertionError: If required data is missing.
346
- """
347
- self._validate_inputs(raw_data, backend)
348
- self._initialize_settings(verbose, backend, memory_efficient)
349
- self._load_data(raw_data)
350
- self._setup_backend()
351
- self._initialize_transmission_replica()
352
-
353
- def _validate_inputs(self, raw_data: Dict[str, Any], backend: str) -> None:
354
- """Validate input parameters.
355
-
356
- Args:
357
- raw_data: Dictionary containing radar data.
358
- backend: Processing backend.
359
-
360
- Raises:
361
- AssertionError: If required data is missing.
362
- ValueError: If invalid backend is specified.
363
- """
364
- assert isinstance(raw_data, dict), 'raw_data must be a dictionary'
365
- assert 'echo' in raw_data, 'raw_data must contain "echo" key'
366
- assert 'ephemeris' in raw_data, 'raw_data must contain "ephemeris" key'
367
- assert 'metadata' in raw_data, 'raw_data must contain "metadata" key'
368
-
369
- valid_backends = {'numpy', 'torch', 'custom'}
370
- if backend not in valid_backends:
371
- raise ValueError(f'Backend must be one of {valid_backends}, got {backend}')
372
-
373
- def _initialize_settings(self, verbose: bool, backend: str, memory_efficient: bool) -> None:
374
- """Initialize processor settings.
375
-
376
- Args:
377
- verbose: Whether to print verbose output.
378
- backend: Processing backend.
379
- memory_efficient: Whether to enable memory-efficient mode.
380
- """
381
- self._backend = backend
382
- self._verbose = verbose
383
- self._memory_efficient = memory_efficient
384
-
385
- if self._verbose:
386
- print(f'Memory efficient mode: {"enabled" if memory_efficient else "disabled"}')
387
-
388
- def _load_data(self, raw_data: Dict[str, Any]) -> None:
389
- """Load and preprocess input data with memory optimization.
390
-
391
- Args:
392
- raw_data: Dictionary containing radar data.
393
- """
394
- # Use views instead of copies where possible
395
- self.radar_data = raw_data['echo']
396
-
397
- # Create a copy for ephemeris as we need to modify it
398
- self.ephemeris = raw_data['ephemeris'].copy()
399
- self.ephemeris['time_stamp'] /= 2**24
400
-
401
- # Use reference for metadata as we don't modify it
402
- self.metadata = raw_data['metadata']
403
-
404
- # Initialize dimensions - these should remain constant throughout processing
405
- self.len_az_line, self.len_range_line = self.radar_data.shape
406
-
407
- if self._verbose:
408
- print(f'Loaded radar data with shape: {self.radar_data.shape}')
409
- print(f'Azimuth lines: {self.len_az_line}, Range lines: {self.len_range_line}')
410
- print(f'Data type: {self.radar_data.dtype}')
411
- print(f'Memory usage: {self.radar_data.nbytes / 1024**3:.2f} GB')
412
-
413
- def _setup_backend(self) -> None:
414
- """Set up processing backend and device configuration."""
415
- if self._backend == 'torch':
416
- if torch is None:
417
- raise ImportError('PyTorch is required for torch backend but not available')
418
- self.device = getattr(
419
- self.radar_data,
420
- 'device',
421
- torch.device('cuda' if torch.cuda.is_available() else 'cpu')
422
- )
423
- if self._verbose:
424
- print(f'Selected device: {self.device}')
425
-
426
- def _initialize_transmission_replica(self) -> None:
427
- """Initialize transmission replica during setup."""
428
- self._generate_tx_replica()
429
-
430
- # ==================== TRANSMISSION REPLICA METHODS ====================
431
-
432
- @timing_decorator
433
- def _generate_tx_replica(self) -> None:
434
- """Generate transmission replica based on metadata parameters.
435
-
436
- This method creates the transmission replica used for range compression
437
- based on the radar system parameters extracted from metadata.
438
- """
439
- if self._verbose:
440
- print('Generating transmission replica...')
441
-
442
- # Extract range decimation and calculate sample frequency
443
- rgdec = int(self.metadata['range_decimation'].unique()[0])
444
- if self._verbose:
445
- print(f'Range decimation code: {rgdec}')
446
-
447
- self.range_sample_freq = range_dec_to_sample_rate(rgdec)
448
- if self._verbose:
449
- print(f'Range sample frequency: {self.range_sample_freq:.2f} Hz')
450
-
451
- # Extract transmission parameters
452
- tx_params = self._extract_tx_parameters()
453
-
454
- # Generate replica signal
455
- self._create_replica_signal(tx_params)
456
-
457
- if self._verbose:
458
- print(f'Replica length: {self.replica_len}')
459
- print('Transmission replica generated successfully!')
460
-
461
- def _extract_tx_parameters(self) -> Dict[str, float]:
462
- """Extract transmission parameters from metadata.
463
-
464
- Returns:
465
- Dictionary containing transmission parameters.
466
- """
467
- txpsf = self.metadata['tx_pulse_start_freq'].unique()[0]
468
- txprr = self.metadata['tx_ramp_rate'].unique()[0]
469
- txpl = self.metadata['tx_pulse_length'].unique()[0]
470
-
471
- if self._verbose:
472
- print(f'TX pulse start frequency: {txpsf:.2f} Hz')
473
- print(f'TX ramp rate: {txprr:.2f} Hz/s')
474
- print(f'TX pulse length: {txpl:.6f} s')
475
-
476
- return {'start_freq': txpsf, 'ramp_rate': txprr, 'pulse_length': txpl}
477
-
478
- def _create_replica_signal(self, tx_params: Dict[str, float]) -> None:
479
- """Create the replica signal from transmission parameters.
480
-
481
- Args:
482
- tx_params: Dictionary containing transmission parameters.
483
- """
484
- txpsf = tx_params['start_freq']
485
- txprr = tx_params['ramp_rate']
486
- txpl = tx_params['pulse_length']
487
-
488
- # Generate replica
489
- self.num_tx_vals = int(txpl * self.range_sample_freq)
490
- if self._verbose:
491
- print(f'Number of TX values: {self.num_tx_vals}')
492
-
493
- tx_replica_time_vals = np.linspace(-txpl/2, txpl/2, num=self.num_tx_vals)
494
- phi1 = txpsf + txprr * txpl / 2
495
- phi2 = txprr / 2
496
-
497
- if self._verbose:
498
- print(f'Phase parameters - phi1: {phi1:.2f}, phi2: {phi2:.2e}')
499
-
500
- self.tx_replica = np.exp(
501
- 2j * np.pi * (phi1 * tx_replica_time_vals + phi2 * tx_replica_time_vals**2)
502
- )
503
- self.replica_len = len(self.tx_replica)
504
-
505
- # ==================== FFT METHODS ====================
506
-
507
- @flush_mem
508
- @timing_decorator
509
- def fft2d(self, w_pad: Optional[int] = None, executors: int = 12) -> None:
510
- """Perform memory-efficient 2D FFT on radar data in range and azimuth dimensions.
511
-
512
- Args:
513
- w_pad: Width padding for range FFT (ignored for dimension preservation).
514
- executors: Number of executors for custom backend.
515
-
516
- Raises:
517
- ValueError: If backend is not supported.
518
- """
519
- if self._verbose:
520
- print(f'FFT input data shape: {self.radar_data.shape}')
521
- print_memory()
522
-
523
- if self._backend == 'numpy':
524
- self._fft2d_numpy_efficient()
525
- elif self._backend == 'custom':
526
- self._fft2d_custom(executors)
527
- elif self._backend == 'torch':
528
- self._fft2d_torch_efficient()
529
- else:
530
- raise ValueError(f'Backend {self._backend} not supported')
531
-
532
- # Verify dimensions are preserved
533
- expected_shape = (self.len_az_line, self.len_range_line)
534
- if self.radar_data.shape != expected_shape:
535
- raise RuntimeError(f'FFT changed radar data shape from {expected_shape} to {self.radar_data.shape}')
536
-
537
- if self._verbose:
538
- print(f'FFT output data shape: {self.radar_data.shape}')
539
- print('- FFT performed successfully!')
540
- print_memory()
541
-
542
- def _fft2d_numpy_efficient(self) -> None:
543
- """Perform memory-efficient 2D FFT using NumPy backend preserving original dimensions.
544
-
545
- Uses in-place operations and memory cleanup for better efficiency.
546
- """
547
- # Store original shape for verification
548
- original_shape = self.radar_data.shape
549
- if self._verbose:
550
- print(f'Original radar data shape: {original_shape}')
551
-
552
- # Ensure data is contiguous and maintain original precision
553
- if not self.radar_data.flags.c_contiguous:
554
- if self._verbose:
555
- print('Making data contiguous...')
556
- self.radar_data = np.ascontiguousarray(self.radar_data)
557
-
558
- # FFT each range line (axis=1) - EXACT SAME as original
559
- if self._verbose:
560
- print(f'Performing FFT along range dimension (axis=1)...')
561
-
562
- # Use same approach as original - no dtype changes
563
- self.radar_data = np.fft.fft(self.radar_data, axis=1)
564
-
565
- if self._verbose:
566
- print(f'First FFT along range dimension completed, shape: {self.radar_data.shape}')
567
- print_memory()
568
-
569
- # FFT each azimuth line (axis=0) with fftshift - EXACT SAME as original
570
- if self._verbose:
571
- print(f'Performing FFT along azimuth dimension (axis=0) with fftshift...')
572
-
573
- # Use same approach as original
574
- self.radar_data = np.fft.fftshift(np.fft.fft(self.radar_data, axis=0), axes=0)
575
-
576
- if self._verbose:
577
- print(f'Second FFT along azimuth dimension completed, shape: {self.radar_data.shape}')
578
- print_memory()
579
-
580
- # Verify shape preservation
581
- assert self.radar_data.shape == original_shape, \
582
- f'FFT changed shape from {original_shape} to {self.radar_data.shape}'
583
-
584
- def _fft2d_torch_efficient(self) -> None:
585
- """Perform memory-efficient 2D FFT using PyTorch backend preserving dimensions.
586
-
587
- Uses in-place operations where possible.
588
- """
589
- original_shape = self.radar_data.shape
590
-
591
- if self._verbose:
592
- print('Performing memory-efficient PyTorch FFT...')
593
- print_memory()
594
-
595
- # FFT each range line (axis=1) - in-place when possible
596
- if self._memory_efficient:
597
- temp = torch.fft.fft(self.radar_data, dim=1)
598
- self.radar_data.copy_(temp)
599
- del temp
600
- torch.cuda.empty_cache() if torch.cuda.is_available() else None
601
- else:
602
- self.radar_data = torch.fft.fft(self.radar_data, dim=1)
603
-
604
- # FFT each azimuth line (axis=0) with fftshift
605
- if self._memory_efficient:
606
- temp = torch.fft.fft(self.radar_data, dim=0)
607
- self.radar_data.copy_(temp)
608
- del temp
609
-
610
- temp = torch.fft.fftshift(self.radar_data, dim=0)
611
- self.radar_data.copy_(temp)
612
- del temp
613
- torch.cuda.empty_cache() if torch.cuda.is_available() else None
614
- else:
615
- self.radar_data = torch.fft.fftshift(
616
- torch.fft.fft(self.radar_data, dim=0),
617
- dim=0
618
- )
619
-
620
- # Verify shape preservation
621
- assert self.radar_data.shape == original_shape, \
622
- f'Torch FFT changed shape from {original_shape} to {self.radar_data.shape}'
623
-
624
- @flush_mem
625
- @timing_decorator
626
- def ifft_range(self) -> None:
627
- """Perform memory-efficient inverse FFT along range dimension."""
628
- if self._backend == 'numpy':
629
- # Use EXACT SAME approach as original
630
- self.radar_data = np.fft.ifftshift(np.fft.ifft(self.radar_data, axis=1), axes=1)
631
- elif self._backend == 'torch':
632
- self.radar_data = torch.fft.ifft(self.radar_data, dim=1)
633
- self.radar_data = torch.fft.ifftshift(self.radar_data, dim=1)
634
- else:
635
- raise ValueError(f'Unsupported backend: {self._backend}')
636
-
637
- @flush_mem
638
- @timing_decorator
639
- def ifft_azimuth(self) -> None:
640
- """Perform memory-efficient inverse FFT along azimuth dimension."""
641
- if self._backend == 'numpy':
642
- # Use EXACT SAME approach as original
643
- self.radar_data = np.fft.ifft(self.radar_data, axis=0)
644
- elif self._backend == 'torch':
645
- self.radar_data = torch.fft.ifft(self.radar_data, dim=0)
646
- else:
647
- raise ValueError(f'Unsupported backend: {self._backend}')
648
-
649
- # ==================== FILTER GENERATION METHODS ====================
650
-
651
- def _apply_window(self, signal: np.ndarray, window_type: str = 'hamming') -> np.ndarray:
652
- """Apply windowing function to signal.
653
-
654
- Args:
655
- signal: Input signal array.
656
- window_type: Type of window ('hamming', 'hanning', 'blackman', 'rectangular').
657
-
658
- Returns:
659
- Windowed signal array.
660
-
661
- Raises:
662
- ValueError: If window type is not supported.
663
- """
664
- if window_type.lower() == 'rectangular' or window_type is None:
665
- return signal
666
-
667
- signal_length = len(signal)
668
-
669
- if window_type.lower() == 'hamming':
670
- window = np.hamming(signal_length)
671
- elif window_type.lower() == 'hanning':
672
- window = np.hanning(signal_length)
673
- elif window_type.lower() == 'blackman':
674
- window = np.blackman(signal_length)
675
- else:
676
- raise ValueError(f'Unsupported window type: {window_type}. '
677
- f'Supported types: hamming, hanning, blackman, rectangular')
678
-
679
- return signal * window
680
-
681
- def _get_reference_function(self, replica_chirp: np.ndarray, window_type: str = 'hamming') -> np.ndarray:
682
- """Generate reference function from replica chirp following C++ implementation.
683
-
684
- Args:
685
- replica_chirp: Input replica chirp signal.
686
- window_type: Type of window to apply.
687
-
688
- Returns:
689
- Reference function for range compression.
690
- """
691
- # Apply windowing to replica chirp first (as in C++ implementation)
692
- windowed_replica = self._apply_window(replica_chirp, window_type)
693
-
694
- # Calculate energy normalization (matching C++ implementation)
695
- # C++ code: norm *= n / norm_size, then energy = accumulate(norm)
696
- norm = np.abs(windowed_replica)
697
- norm_size = len(norm)
698
- norm_squared = (norm * norm) / norm_size # This matches C++ norm *= n / norm_size
699
- energy = np.sum(norm_squared)
700
-
701
- if self._verbose:
702
- print(f'Calculated energy for normalization: {energy:.6f}')
703
-
704
- # Create reference function
705
- reference = windowed_replica.copy()
706
-
707
- # Apply FFT and conjugate (matching C++ implementation)
708
- reference = np.fft.fft(reference)
709
- reference = np.conjugate(reference)
710
-
711
- # Normalize by energy
712
- if energy > 0:
713
- reference = reference / energy
714
- else:
715
- if self._verbose:
716
- print('⚠️ Warning: Zero energy detected, skipping normalization')
717
-
718
- return reference
719
-
720
- @flush_mem
721
- @timing_decorator
722
- def get_range_filter(self, pad_w: int = 0, window_type: str = 'hamming') -> np.ndarray:
723
- """Compute memory-efficient range filter for radar data compression.
724
-
725
- Args:
726
- pad_w: Width padding (ignored - filter always matches radar data dimensions).
727
- window_type: Type of window to apply ('hamming', 'hanning', 'blackman', 'rectangular').
728
-
729
- Returns:
730
- Range filter array exactly matching radar data range dimension.
731
-
732
- Raises:
733
- AssertionError: If filter dimensions are invalid.
734
- ValueError: If window type is not supported.
735
- """
736
- # Use exact radar data dimensions - no padding considerations
737
- current_range_dim = self.radar_data.shape[1]
738
-
739
- if self._verbose:
740
- print(f'Creating range filter for radar data shape: {self.radar_data.shape}')
741
- print(f'Range dimension: {current_range_dim}')
742
- print(f'TX replica length: {self.num_tx_vals}')
743
- print(f'Window type: {window_type}')
744
-
745
- # Create range filter with exact radar data range dimension
746
- range_filter = np.zeros(current_range_dim, dtype=complex)
747
-
748
- # Place replica in center of filter
749
- if current_range_dim >= self.num_tx_vals:
750
- index_start = (current_range_dim - self.num_tx_vals) // 2
751
- index_end = index_start + self.num_tx_vals
752
-
753
- if self._verbose:
754
- print(f'Placing replica at indices [{index_start}:{index_end}] in filter of length {current_range_dim}')
755
-
756
- range_filter[index_start:index_end] = self.tx_replica
757
- else:
758
- # If range dimension is smaller than replica, truncate replica
759
- if self._verbose:
760
- print(f'⚠️ Range dimension ({current_range_dim}) < replica length ({self.num_tx_vals}), truncating replica')
761
-
762
- replica_start = (self.num_tx_vals - current_range_dim) // 2
763
- replica_end = replica_start + current_range_dim
764
- range_filter[:] = self.tx_replica[replica_start:replica_end]
765
-
766
- # Generate reference function using C++ approach - apply windowing to the placed replica
767
- range_filter = self._get_reference_function(range_filter, window_type)
768
-
769
- if self._verbose:
770
- print(f'Range filter shape: {range_filter.shape}')
771
- print(f'Range filter energy after normalization: {np.sum(np.abs(range_filter)**2):.6f}')
772
-
773
- # Ensure filter exactly matches radar data range dimension
774
- assert range_filter.shape[0] == current_range_dim, \
775
- f'Filter shape mismatch: expected {current_range_dim}, got {range_filter.shape[0]}'
776
-
777
- return range_filter
778
-
779
- @flush_mem
780
- @timing_decorator
781
- def get_rcmc(self) -> np.ndarray:
782
- """Calculate memory-efficient Range Cell Migration Correction filter.
783
-
784
- Returns:
785
- RCMC filter array matching radar data dimensions.
786
- """
787
- self._compute_effective_velocities()
788
-
789
- self.wavelength = cnst.TX_WAVELENGTH_M
790
-
791
- # Generate azimuth frequency values for the entire azimuth line length
792
- self.az_freq_vals = np.arange(
793
- -self.az_sample_freq/2,
794
- self.az_sample_freq/2,
795
- self.az_sample_freq/self.len_az_line
796
- )
797
-
798
- # Ensure we have exactly the right number of frequency values
799
- if len(self.az_freq_vals) != self.len_az_line:
800
- self.az_freq_vals = np.linspace(
801
- -self.az_sample_freq/2,
802
- self.az_sample_freq/2,
803
- self.len_az_line,
804
- endpoint=False
805
- )
806
-
807
- if self._verbose:
808
- print(f'Azimuth frequency values shape: {self.az_freq_vals.shape}')
809
- print(f'Effective velocities shape: {self.effective_velocities.shape}')
810
-
811
- # Take mean effective velocity across range for each azimuth line
812
- # This reduces from (56130, 25724) to (56130,)
813
- mean_effective_velocities = np.mean(self.effective_velocities, axis=1)
814
-
815
- if self._verbose:
816
- print(f'Mean effective velocities shape: {mean_effective_velocities.shape}')
817
-
818
- # Cosine of instantaneous squint angle
819
- # Broadcasting: (56130,) with (56130,) -> (56130,)
820
- self.D = np.sqrt(
821
- 1 - (self.wavelength**2 * self.az_freq_vals**2) /
822
- (4 * mean_effective_velocities**2)
823
- )
824
-
825
- if self._verbose:
826
- print(f'D (cosine squint angle) shape: {self.D.shape}')
827
-
828
- # Create RCMC filter with CURRENT radar data dimensions (should be original dimensions)
829
- current_range_dim = self.radar_data.shape[1]
830
-
831
- range_freq_vals = np.linspace(
832
- -self.range_sample_freq/2,
833
- self.range_sample_freq/2,
834
- num=current_range_dim
835
- )
836
-
837
- if self._verbose:
838
- print(f'Range frequency values shape: {range_freq_vals.shape}')
839
- print(f'Current radar data range dimension: {current_range_dim}')
840
- print(f'Slant range vec shape: {self.slant_range_vec.shape}')
841
-
842
- # Calculate RCMC shift - use first slant range value for reference
843
- rcmc_shift = self.slant_range_vec[0] * (1/self.D - 1)
844
-
845
- if self._verbose:
846
- print(f'RCMC shift shape: {rcmc_shift.shape}')
847
-
848
- # IMPORTANT: Use same calculation method as original - no chunked processing
849
- # Broadcasting for final filter calculation
850
- range_freq_2d = range_freq_vals[np.newaxis, :] # (1, current_range_dim)
851
- rcmc_shift_2d = rcmc_shift[:, np.newaxis] # (56130, 1)
852
-
853
- rcmc_filter = np.exp(4j * np.pi * range_freq_2d * rcmc_shift_2d / self.c)
854
-
855
- if self._verbose:
856
- print(f'Final RCMC filter shape: {rcmc_filter.shape}')
857
-
858
- return rcmc_filter
859
-
860
- @flush_mem
861
- @timing_decorator
862
- def get_azimuth_filter(self) -> np.ndarray:
863
- """Calculate memory-efficient azimuth compression filter.
864
-
865
- Returns:
866
- Azimuth filter array matching radar data dimensions.
867
- """
868
- if self._verbose:
869
- print(f'Computing azimuth filter...')
870
- print(f'Slant range vec shape: {self.slant_range_vec.shape}')
871
- print(f'D shape: {self.D.shape}')
872
- print(f'Wavelength: {self.wavelength}')
873
-
874
- # Use current radar data dimensions (should match original slant range vector)
875
- current_range_dim = self.radar_data.shape[1]
876
-
877
- # Ensure slant range vector matches radar data dimensions
878
- if current_range_dim != len(self.slant_range_vec):
879
- if self._verbose:
880
- print(f'⚠️ Warning: Current range dim ({current_range_dim}) != slant range vec length ({len(self.slant_range_vec)})')
881
- print(f'This should not happen - using original slant range vector')
882
- current_slant_range_vec = self.slant_range_vec
883
- else:
884
- current_slant_range_vec = self.slant_range_vec
885
-
886
- # IMPORTANT: Use same calculation method as original - no chunked processing
887
- # Broadcasting for azimuth filter calculation
888
- slant_range_2d = current_slant_range_vec[np.newaxis, :] # (1, range_dim)
889
- D_2d = self.D[:, np.newaxis] # (56130, 1)
890
-
891
- azimuth_filter = np.exp(4j * np.pi * slant_range_2d * D_2d / self.wavelength)
892
-
893
- if self._verbose:
894
- print(f'Azimuth filter shape: {azimuth_filter.shape}')
895
-
896
- return azimuth_filter
897
-
898
- # ==================== VELOCITY COMPUTATION METHODS ====================
899
-
900
- @timing_decorator
901
- def _compute_effective_velocities(self) -> None:
902
- """Calculate effective spacecraft velocities for processing.
903
-
904
- This method computes the effective velocities needed for RCMC and
905
- azimuth compression by combining spacecraft and ground velocities.
906
- """
907
- # Initialize timing and geometry parameters
908
- self._initialize_timing_parameters()
909
-
910
- # Calculate spacecraft velocities and positions
911
- space_velocities, positions = self._calculate_spacecraft_dynamics()
912
-
913
- # Compute effective velocities using Earth model
914
- self._compute_ground_velocities(space_velocities, positions)
915
-
916
- def _initialize_timing_parameters(self) -> None:
917
- """Initialize timing and geometry parameters for velocity computation.
918
-
919
- Raises:
920
- KeyError: If required metadata columns are missing.
921
- ValueError: If metadata values are invalid.
922
- """
923
- self.c = cnst.SPEED_OF_LIGHT_MPS
924
-
925
- # Check for required columns with case-insensitive matching
926
- metadata_columns = {col.lower(): col for col in self.metadata.columns}
927
-
928
- required_mappings = {
929
- 'pri': ['pri', 'pulse_repetition_interval'],
930
- 'rank': ['rank'],
931
- 'swst': ['swst', 'sampling_window_start_time', 'start_time']
932
- }
933
-
934
- column_map = {}
935
- for param, possible_names in required_mappings.items():
936
- found_column = None
937
- for name in possible_names:
938
- if name.lower() in metadata_columns:
939
- found_column = metadata_columns[name.lower()]
940
- break
941
-
942
- if found_column is None:
943
- available_cols = list(self.metadata.columns)
944
- raise KeyError(
945
- f'Could not find column for {param}. Tried: {possible_names}. '
946
- f'Available columns: {available_cols}'
947
- )
948
- column_map[param] = found_column
949
-
950
- if self._verbose:
951
- print('Column mapping:')
952
- for param, col in column_map.items():
953
- print(f' {param} -> {col}')
954
-
955
- # Extract parameters with error handling
956
- try:
957
- self.pri = self.metadata[column_map['pri']].iloc[0]
958
- rank = self.metadata[column_map['rank']].iloc[0]
959
- range_start_time_base = self.metadata[column_map['swst']].iloc[0]
960
- except (IndexError, TypeError) as e:
961
- raise ValueError(f'Error extracting metadata values: {str(e)}') from e
962
-
963
- # Validate values
964
- assert self.pri > 0, f'PRI must be positive, got {self.pri}'
965
- assert rank >= 0, f'Rank must be non-negative, got {rank}'
966
-
967
- if self._verbose:
968
- print(f'PRI: {self.pri:.6f} s')
969
- print(f'Rank: {rank}')
970
- print(f'Base range start time: {range_start_time_base:.6f} s')
971
-
972
- # Calculate derived parameters
973
- suppressed_data_time = 320 / (8 * cnst.F_REF)
974
- range_start_time = range_start_time_base + suppressed_data_time
975
-
976
- # Sample rates
977
- range_sample_period = 1 / self.range_sample_freq
978
- self.az_sample_freq = 1 / self.pri
979
-
980
- if self._verbose:
981
- print(f'Range start time: {range_start_time:.6f} s')
982
- print(f'Azimuth sample frequency: {self.az_sample_freq:.2f} Hz')
983
-
984
- # Fast time and slant range vectors
985
- sample_num_along_range_line = np.arange(0, self.len_range_line, 1)
986
- fast_time_vec = range_start_time + (range_sample_period * sample_num_along_range_line)
987
- self.slant_range_vec = ((rank * self.pri) + fast_time_vec) * self.c / 2
988
-
989
- if self._verbose:
990
- print(f'Slant range vector shape: {self.slant_range_vec.shape}')
991
- print(f'Slant range min/max: {self.slant_range_vec.min():.2f}/{self.slant_range_vec.max():.2f} m')
992
-
993
- def _calculate_spacecraft_dynamics(self) -> Tuple[np.ndarray, np.ndarray]:
994
- """Calculate spacecraft velocities and positions.
995
-
996
- Returns:
997
- Tuple of (space_velocities, positions) arrays.
998
-
999
- Raises:
1000
- KeyError: If required columns are missing from ephemeris or metadata.
1001
- ValueError: If interpolation fails.
1002
- """
1003
- if self._verbose:
1004
- print('Calculating spacecraft dynamics...')
1005
- print(f'Ephemeris shape: {self.ephemeris.shape}')
1006
- print(f'Metadata shape: {self.metadata.shape}')
1007
-
1008
- # Spacecraft velocity calculations
1009
- ecef_vels = self.ephemeris.apply(
1010
- lambda x: math.sqrt(x['vx']**2 + x['vy']**2 + x['vz']**2),
1011
- axis=1
1012
- )
1013
-
1014
- if self._verbose:
1015
- print(f'ECEF velocities shape: {ecef_vels.shape}')
1016
- print(f'ECEF velocities range: {ecef_vels.min():.2f} - {ecef_vels.max():.2f} m/s')
1017
-
1018
- # Extract arrays and ensure they are proper numpy arrays
1019
- time_stamps = self.ephemeris['time_stamp'].values
1020
- velocity_values = ecef_vels.values
1021
- x_values = self.ephemeris['x'].values
1022
- y_values = self.ephemeris['y'].values
1023
- z_values = self.ephemeris['z'].values
1024
-
1025
- if self._verbose:
1026
- print(f'Time stamps shape: {time_stamps.shape}')
1027
- print(f'Time stamps range: {time_stamps.min():.6f} - {time_stamps.max():.6f}')
1028
- print(f' veocity values shape: {velocity_values.shape}')
1029
- print(f'Position arrays shapes: x={x_values.shape}, y={y_values.shape}, z={z_values.shape}')
1030
-
1031
- # Ensure arrays are sorted by time for interpolation
1032
- sort_indices = np.argsort(time_stamps)
1033
- time_stamps = time_stamps[sort_indices]
1034
- velocity_values = velocity_values[sort_indices]
1035
- x_values = x_values[sort_indices]
1036
- y_values = y_values[sort_indices]
1037
- z_values = z_values[sort_indices]
1038
-
1039
- # Calculate metadata time stamps
1040
- metadata_times = self.metadata.apply(
1041
- lambda x: x['coarse_time'] + x['fine_time'],
1042
- axis=1
1043
- ).values
1044
-
1045
- if self._verbose:
1046
- print(f'Metadata times shape: {metadata_times.shape}')
1047
- print(f'Metadata times range: {metadata_times.min():.6f} - {metadata_times.max():.6f}')
1048
- print(f'Ephemeris time range: {time_stamps.min():.6f} - {time_stamps.max():.6f}')
1049
-
1050
- # Check if metadata times are within ephemeris time range
1051
- time_within_range = (metadata_times >= time_stamps.min()) & (metadata_times <= time_stamps.max())
1052
- if not np.all(time_within_range):
1053
- out_of_range_count = np.sum(~time_within_range)
1054
- if self._verbose:
1055
- print(f'⚠️ Warning: {out_of_range_count} metadata times are outside ephemeris range')
1056
- print(f' Will use boundary values for extrapolation')
1057
-
1058
- # Create interpolation functions with bounds_error=False and fill_value for extrapolation
1059
- try:
1060
- velocity_interp = interp1d(
1061
- time_stamps, velocity_values,
1062
- kind='linear',
1063
- bounds_error=False,
1064
- fill_value=(velocity_values[0], velocity_values[-1])
1065
- )
1066
- x_interp = interp1d(
1067
- time_stamps, x_values,
1068
- kind='linear',
1069
- bounds_error=False,
1070
- fill_value=(x_values[0], x_values[-1])
1071
- )
1072
- y_interp = interp1d(
1073
- time_stamps, y_values,
1074
- kind='linear',
1075
- bounds_error=False,
1076
- fill_value=(y_values[0], y_values[-1])
1077
- )
1078
- z_interp = interp1d(
1079
- time_stamps, z_values,
1080
- kind='linear',
1081
- bounds_error=False,
1082
- fill_value=(z_values[0], z_values[-1])
1083
- )
1084
- except ValueError as e:
1085
- raise ValueError(f'Failed to create interpolation functions: {str(e)}') from e
1086
-
1087
- # Interpolate at metadata time points
1088
- try:
1089
- space_velocities = velocity_interp(metadata_times)
1090
- x_interp_vals = x_interp(metadata_times)
1091
- y_interp_vals = y_interp(metadata_times)
1092
- z_interp_vals = z_interp(metadata_times)
1093
- except Exception as e:
1094
- raise ValueError(f'Interpolation failed: {str(e)}') from e
1095
-
1096
- # Ensure interpolated values are arrays and handle any remaining NaN values
1097
- space_velocities = np.asarray(space_velocities)
1098
- x_interp_vals = np.asarray(x_interp_vals)
1099
- y_interp_vals = np.asarray(y_interp_vals)
1100
- z_interp_vals = np.asarray(z_interp_vals)
1101
-
1102
- # Check for and handle NaN values
1103
- if np.any(np.isnan(space_velocities)):
1104
- nan_count = np.sum(np.isnan(space_velocities))
1105
- if self._verbose:
1106
- print(f'⚠️ Found {nan_count} NaN values in space_velocities, filling with nearest valid values')
1107
-
1108
- # Fill NaN values with nearest valid values
1109
- valid_mask = ~np.isnan(space_velocities)
1110
- if np.any(valid_mask):
1111
- # Forward fill
1112
- space_velocities = pd.Series(space_velocities).fillna(method='ffill').fillna(method='bfill').values
1113
- else:
1114
- # If all NaN, use average ephemeris velocity
1115
- space_velocities.fill(np.nanmean(velocity_values))
1116
-
1117
- # Handle NaN values in position components
1118
- for vals, name in [(x_interp_vals, 'x'), (y_interp_vals, 'y'), (z_interp_vals, 'z')]:
1119
- if np.any(np.isnan(vals)):
1120
- nan_count = np.sum(np.isnan(vals))
1121
- if self._verbose:
1122
- print(f'⚠️ Found {nan_count} NaN values in {name}_interp_vals, filling with nearest valid values')
1123
-
1124
- valid_mask = ~np.isnan(vals)
1125
- if np.any(valid_mask):
1126
- vals_series = pd.Series(vals).fillna(method='ffill').fillna(method='bfill')
1127
- if name == 'x':
1128
- x_interp_vals = vals_series.values
1129
- elif name == 'y':
1130
- y_interp_vals = vals_series.values
1131
- else:
1132
- z_interp_vals = vals_series.values
1133
-
1134
- if self._verbose:
1135
- print(f'Interpolated space_velocities shape: {space_velocities.shape}')
1136
- print(f'Interpolated position component shapes: x={x_interp_vals.shape}, y={y_interp_vals.shape}, z={z_interp_vals.shape}')
1137
-
1138
- # Create position array
1139
- positions = np.column_stack([x_interp_vals, y_interp_vals, z_interp_vals])
1140
-
1141
- if self._verbose:
1142
- print(f'Final space_velocities shape: {space_velocities.shape}')
1143
- print(f'Final positions shape: {positions.shape}')
1144
- print(f'Space velocities range: {space_velocities.min():.2f} - {space_velocities.max():.2f} m/s')
1145
- print(f'Position range - x: {positions[:, 0].min():.0f} to {positions[:, 0].max():.0f}')
1146
- print(f'Position range - y: {positions[:, 1].min():.0f} to {positions[:, 1].max():.0f}')
1147
- print(f'Position range - z: {positions[:, 2].min():.0f} to {positions[:, 2].max():.0f}')
1148
-
1149
- # Validate outputs
1150
- assert isinstance(space_velocities, np.ndarray), 'space_velocities must be numpy array'
1151
- assert isinstance(positions, np.ndarray), 'positions must be numpy array'
1152
- assert len(space_velocities.shape) == 1, f'space_velocities must be 1D, got shape {space_velocities.shape}'
1153
- assert len(positions.shape) == 2, f'positions must be 2D array, got shape {positions.shape}'
1154
- assert positions.shape[1] == 3, f'positions must have 3 columns (x,y,z), got {positions.shape[1]}'
1155
- assert space_velocities.shape[0] == positions.shape[0], f'velocity and position arrays must have same length'
1156
-
1157
- # Final check for NaN values after cleaning
1158
- assert not np.any(np.isnan(space_velocities)), 'NaN values still present in space_velocities after cleaning'
1159
- assert not np.any(np.isnan(positions)), 'NaN values still present in positions after cleaning'
1160
-
1161
- # Check for reasonable values
1162
- assert np.all(space_velocities > 1000), f'Space velocities too low (min: {space_velocities.min():.2f} m/s)'
1163
- assert np.all(space_velocities < 20000), f'Space velocities too high (max: {space_velocities.max():.2f} m/s)'
1164
-
1165
- position_magnitudes = np.linalg.norm(positions, axis=1)
1166
- assert np.all(position_magnitudes > 6e6), f'Position magnitudes too small (min: {position_magnitudes.min():.0f} m)'
1167
- assert np.all(position_magnitudes < 8e6), f'Position magnitudes too large (max: {position_magnitudes.max():.0f} m)'
1168
-
1169
- return space_velocities, positions
1170
-
1171
- def _compute_ground_velocities(self, space_velocities: np.ndarray, positions: np.ndarray) -> None:
1172
- """Compute ground velocities and effective velocities.
1173
-
1174
- Args:
1175
- space_velocities: Spacecraft velocity magnitudes (1D array).
1176
- positions: Spacecraft position vectors (2D array, shape [N, 3]).
1177
-
1178
- Raises:
1179
- AssertionError: If input arrays have incompatible shapes.
1180
- ValueError: If calculations produce invalid results.
1181
- """
1182
- # Enhanced input validation
1183
- assert isinstance(space_velocities, np.ndarray), f'space_velocities must be numpy array, got {type(space_velocities)}'
1184
- assert isinstance(positions, np.ndarray), f'positions must be numpy array, got {type(positions)}'
1185
- assert len(positions.shape) == 2, f'positions must be 2D, got shape {positions.shape}'
1186
- assert positions.shape[1] == 3, f'positions must have 3 columns, got {positions.shape[1]}'
1187
- assert space_velocities.shape[0] == positions.shape[0], f'Array lengths must match: velocities={space_velocities.shape[0]}, positions={positions.shape[0]}'
1188
-
1189
- # Ensure arrays are proper numpy arrays with correct dtypes
1190
- space_velocities = np.asarray(space_velocities, dtype=np.float64)
1191
- positions = np.asarray(positions, dtype=np.float64)
1192
-
1193
- # Check for NaN/inf values
1194
- assert not np.any(np.isnan(space_velocities)), 'NaN values in space_velocities'
1195
- assert not np.any(np.isnan(positions)), 'NaN values in positions'
1196
- assert not np.any(np.isinf(space_velocities)), 'Infinite values in space_velocities'
1197
- assert not np.any(np.isinf(positions)), 'Infinite values in positions'
1198
-
1199
- if self._verbose:
1200
- print('Computing ground velocities...')
1201
- print(f'Space velocities shape: {space_velocities.shape}')
1202
- print(f'Positions shape: {positions.shape}')
1203
- print(f'Slant range vec shape: {self.slant_range_vec.shape}')
1204
- print(f'Input data ranges:')
1205
- print(f' Space velocities: {space_velocities.min():.2f} - {space_velocities.max():.2f} m/s')
1206
- print(f' Positions X: {positions[:, 0].min():.0f} - {positions[:, 0].max():.0f} m')
1207
- print(f' Positions Y: {positions[:, 1].min():.0f} - {positions[:, 1].max():.0f} m')
1208
- print(f' Positions Z: {positions[:, 2].min():.0f} - {positions[:, 2].max():.0f} m')
1209
-
1210
- # Earth model calculations
1211
- a = float(cnst.WGS84_SEMI_MAJOR_AXIS_M)
1212
- b = float(cnst.WGS84_SEMI_MINOR_AXIS_M)
1213
-
1214
- if self._verbose:
1215
- print(f'Earth model parameters: a={a:.0f} m, b={b:.0f} m')
1216
-
1217
- # Calculate spacecraft heights (magnitudes of position vectors)
1218
- H = np.linalg.norm(positions, axis=1) # axis=1 for row-wise norm
1219
- H = np.asarray(H, dtype=np.float64)
1220
-
1221
- # Validate H calculation
1222
- assert H.shape == space_velocities.shape, f'H shape {H.shape} must match velocities shape {space_velocities.shape}'
1223
- assert not np.any(np.isnan(H)), 'NaN values in H (spacecraft heights)'
1224
- assert np.all(H > 0), 'All spacecraft heights must be positive'
1225
-
1226
- W = space_velocities / H
1227
- W = np.asarray(W, dtype=np.float64)
1228
-
1229
- # Calculate latitude using more robust method
1230
- xy_distance = np.sqrt(positions[:, 0]**2 + positions[:, 1]**2)
1231
- xy_distance = np.asarray(xy_distance, dtype=np.float64)
1232
- lat = np.arctan2(positions[:, 2], xy_distance)
1233
- lat = np.asarray(lat, dtype=np.float64)
1234
-
1235
- if self._verbose:
1236
- print(f'H (heights) shape: {H.shape}, range: {H.min():.0f} - {H.max():.0f} m')
1237
- print(f'W (angular velocities) shape: {W.shape}, range: {W.min():.6f} - {W.max():.6f} rad/s')
1238
- print(f'Latitudes range: {np.degrees(lat.min()):.2f} - {np.degrees(lat.max()):.2f} deg')
1239
-
1240
- # Validate intermediate calculations
1241
- assert not np.any(np.isnan(W)), 'NaN values in W (angular velocities)'
1242
- assert not np.any(np.isnan(lat)), 'NaN values in latitude'
1243
-
1244
- # Local Earth radius calculation with explicit numpy array operations
1245
- cos_lat = np.cos(lat)
1246
- sin_lat = np.sin(lat)
1247
- cos_lat = np.asarray(cos_lat, dtype=np.float64)
1248
- sin_lat = np.asarray(sin_lat, dtype=np.float64)
1249
-
1250
- # Ensure all terms are numpy arrays before sqrt operation
1251
- numerator = np.asarray(a**4 * cos_lat**2 + b**4 * sin_lat**2, dtype=np.float64)
1252
- denominator = np.asarray(a**2 * cos_lat**2 + b**2 * sin_lat**2, dtype=np.float64)
1253
- ratio = numerator / denominator
1254
- ratio = np.asarray(ratio, dtype=np.float64)
1255
-
1256
- local_earth_rad = np.sqrt(ratio)
1257
- local_earth_rad = np.asarray(local_earth_rad, dtype=np.float64)
1258
-
1259
- if self._verbose:
1260
- print(f'Local Earth radius range: {local_earth_rad.min():.0f} - {local_earth_rad.max():.0f} m')
1261
-
1262
- # Validate local Earth radius
1263
- assert not np.any(np.isnan(local_earth_rad)), 'NaN values in local_earth_rad'
1264
- assert np.all(local_earth_rad > 0), 'All local Earth radii must be positive'
1265
-
1266
- # Ensure slant_range_vec is also a proper numpy array
1267
- slant_range_vec = np.asarray(self.slant_range_vec, dtype=np.float64)
1268
-
1269
- # Broadcasting for slant range calculation
1270
- slant_range_2d = slant_range_vec[np.newaxis, :] # Shape: [1, range_samples]
1271
- local_earth_rad_2d = local_earth_rad[:, np.newaxis] # Shape: [azimuth_samples, 1]
1272
- H_2d = H[:, np.newaxis] # Shape: [azimuth_samples, 1]
1273
- W_2d = W[:, np.newaxis] # Shape: [azimuth_samples, 1]
1274
-
1275
- # Calculate cosine of look angle with explicit array operations
1276
- term1 = np.asarray(local_earth_rad_2d**2, dtype=np.float64)
1277
- term2 = np.asarray(H_2d**2, dtype=np.float64)
1278
- term3 = np.asarray(slant_range_2d**2, dtype=np.float64)
1279
- term4 = np.asarray(2 * local_earth_rad_2d * H_2d, dtype=np.float64)
1280
-
1281
- cos_beta = (term1 + term2 - term3) / term4
1282
- cos_beta = np.asarray(cos_beta, dtype=np.float64)
1283
-
1284
- # Clip to valid range for cosine
1285
- cos_beta = np.clip(cos_beta, -1.0, 1.0)
1286
-
1287
- if self._verbose:
1288
- print(f'cos_beta shape: {cos_beta.shape}')
1289
- print(f'cos_beta range: {cos_beta.min():.3f} - {cos_beta.max():.3f}')
1290
-
1291
- # Calculate ground velocities
1292
- ground_velocities = local_earth_rad_2d * W_2d * cos_beta
1293
- ground_velocities = np.asarray(ground_velocities, dtype=np.float64)
1294
-
1295
- if self._verbose:
1296
- print(f'Ground velocities shape: {ground_velocities.shape}')
1297
-
1298
- # Calculate effective velocities
1299
- space_velocities_2d = space_velocities[:, np.newaxis] # Shape: [azimuth_samples, 1]
1300
- effective_vel_product = space_velocities_2d * ground_velocities
1301
- effective_vel_product = np.asarray(effective_vel_product, dtype=np.float64)
1302
-
1303
- # Ensure non-negative values before sqrt
1304
- effective_vel_product = np.maximum(effective_vel_product, 0.0)
1305
-
1306
- self.effective_velocities = np.sqrt(effective_vel_product)
1307
- self.effective_velocities = np.asarray(self.effective_velocities, dtype=np.float64)
1308
-
1309
- if self._verbose:
1310
- print(f'Effective velocities shape: {self.effective_velocities.shape}')
1311
- print(f'Effective velocities range: {self.effective_velocities.min():.2f} - {self.effective_velocities.max():.2f} m/s')
1312
-
1313
- # Final validation
1314
- assert not np.any(np.isnan(self.effective_velocities)), 'NaN values in effective velocities'
1315
- assert not np.any(np.isinf(self.effective_velocities)), 'Infinite values in effective velocities'
1316
- assert np.all(self.effective_velocities >= 0), 'All effective velocities must be non-negative'
1317
-
1318
-
1319
-
1320
- # ==================== MAIN PROCESSING METHODS ====================
1321
-
1322
- @flush_mem
1323
- @timing_decorator
1324
- def data_focus(self) -> None:
1325
- """Perform memory-efficient complete SAR data focusing using Range Doppler Algorithm.
1326
-
1327
- This method orchestrates the complete SAR focusing process by calling
1328
- three main processing steps in sequence.
1329
-
1330
- Raises:
1331
- RuntimeError: If data dimensions change unexpectedly during processing.
1332
- """
1333
- if self._verbose:
1334
- print('Starting memory-efficient SAR data focusing...')
1335
- print(f'Initial radar data shape: {self.radar_data.shape}')
1336
- print_memory()
1337
-
1338
- # Store initial shape for verification
1339
- initial_shape = self.radar_data.shape
1340
- expected_shape = (self.len_az_line, self.len_range_line)
1341
-
1342
- assert initial_shape == expected_shape, \
1343
- f'Initial data shape {initial_shape} does not match expected {expected_shape}'
1344
-
1345
-
1346
- self.raw_data = copy.deepcopy(self.radar_data)
1347
- if self._verbose:
1348
- print(f'Raw radar data shape: {self.raw_data.shape}')
1349
- print_memory()
1350
- # ------------------------------------------------------------------------
1351
- # Step 1: 2D FFT transformation (preserves dimensions)
1352
- self.fft2d()
1353
- assert self.radar_data.shape == initial_shape, \
1354
- f'FFT changed data shape from {initial_shape} to {self.radar_data.shape}'
1355
- # ------------------------------------------------------------------------
1356
-
1357
- # Step 2: Range compression
1358
- self.range_compression()
1359
- self.range_compressed_data = ifft2d(copy.deepcopy(self.radar_data))
1360
- if self._verbose:
1361
- print(f'Range compressed data shape: {self.radar_data.shape}')
1362
- print_memory()
1363
- # ------------------------------------------------------------------------
1364
-
1365
- # Step 3: Range Cell Migration Correction
1366
- self.rcmc()
1367
- self.rcmc_data = iff_azimuth(copy.deepcopy(self.radar_data))
1368
- if self._verbose:
1369
- print(f'RCMC data shape: {self.radar_data.shape}')
1370
- print_memory()
1371
- # ------------------------------------------------------------------------
1372
- # Step 4: Azimuth compression
1373
- self.azimuth_compression()
1374
- self.azimuth_compressed_data = self.radar_data
1375
- if self._verbose:
1376
- print(f'SAR data focusing completed successfully!')
1377
- print(f'Final radar data shape: {self.radar_data.shape}')
1378
- print_memory()
1379
-
1380
- @flush_mem
1381
- @timing_decorator
1382
- def range_compression(self) -> None:
1383
- """Perform memory-efficient range compression step.
1384
-
1385
- This method applies the range compression filter to compress the radar
1386
- signal in the range dimension while preserving data dimensions.
1387
-
1388
- Raises:
1389
- RuntimeError: If data dimensions change unexpectedly during processing.
1390
- """
1391
- if self._verbose:
1392
- print('Starting range compression...')
1393
- print(f'Input radar data shape: {self.radar_data.shape}')
1394
- print_memory()
1395
-
1396
- # Store initial shape for verification
1397
- initial_shape = self.radar_data.shape
1398
-
1399
- # Legacy compatibility parameters
1400
- w_pad = 0
1401
- original_w = initial_shape[1]
1402
-
1403
- if self._verbose:
1404
- print(f'Processing with original_w={original_w}')
1405
-
1406
- # Perform range compression
1407
- self._perform_range_compression_efficient(w_pad, original_w)
1408
-
1409
- # Verify dimensions are preserved
1410
- assert self.radar_data.shape == initial_shape, \
1411
- f'Range compression changed data shape from {initial_shape} to {self.radar_data.shape}'
1412
-
1413
- if self._verbose:
1414
- print(f'Range compression completed successfully!')
1415
- print(f'Output radar data shape: {self.radar_data.shape}')
1416
- print_memory()
1417
-
1418
- @flush_mem
1419
- @timing_decorator
1420
- def rcmc(self) -> None:
1421
- """Perform memory-efficient Range Cell Migration Correction.
1422
-
1423
- This method applies the RCMC filter to correct for range cell migration
1424
- effects and performs inverse FFT in the range dimension.
1425
-
1426
- Raises:
1427
- RuntimeError: If data dimensions change unexpectedly during processing.
1428
- """
1429
- if self._verbose:
1430
- print('Starting Range Cell Migration Correction...')
1431
- print(f'Input radar data shape: {self.radar_data.shape}')
1432
- print_memory()
1433
-
1434
- # Store initial shape for verification
1435
- initial_shape = self.radar_data.shape
1436
-
1437
- # Perform RCMC
1438
- self._perform_rcmc_efficient()
1439
-
1440
- # Verify dimensions are preserved
1441
- assert self.radar_data.shape == initial_shape, \
1442
- f'RCMC changed data shape from {initial_shape} to {self.radar_data.shape}'
1443
-
1444
- if self._verbose:
1445
- print(f'RCMC completed successfully!')
1446
- print(f'Output radar data shape: {self.radar_data.shape}')
1447
- print_memory()
1448
-
1449
- @flush_mem
1450
- @timing_decorator
1451
- def azimuth_compression(self) -> None:
1452
- """Perform memory-efficient azimuth compression step.
1453
-
1454
- This method applies the azimuth compression filter to focus the radar
1455
- signal in the azimuth dimension and performs inverse FFT in azimuth.
1456
-
1457
- Raises:
1458
- RuntimeError: If data dimensions change unexpectedly during processing.
1459
- """
1460
- if self._verbose:
1461
- print('Starting azimuth compression...')
1462
- print(f'Input radar data shape: {self.radar_data.shape}')
1463
- print_memory()
1464
-
1465
- # Store initial shape for verification
1466
- initial_shape = self.radar_data.shape
1467
-
1468
- # Perform azimuth compression
1469
- self._perform_azimuth_compression_efficient()
1470
-
1471
- # Verify dimensions are preserved
1472
- assert self.radar_data.shape == initial_shape, \
1473
- f'Azimuth compression changed data shape from {initial_shape} to {self.radar_data.shape}'
1474
-
1475
- if self._verbose:
1476
- print(f'Azimuth compression completed successfully!')
1477
- print(f'Output radar data shape: {self.radar_data.shape}')
1478
- print_memory()
1479
-
1480
- def _perform_range_compression_efficient(self, w_pad: int, original_w: int) -> None:
1481
- """Perform memory-efficient range compression step while preserving data dimensions.
1482
-
1483
- Args:
1484
- w_pad: Width padding (ignored - dimensions preserved).
1485
- original_w: Original width (for verification).
1486
-
1487
- Raises:
1488
- ValueError: If array shapes are incompatible.
1489
- AssertionError: If dimensions change unexpectedly.
1490
- """
1491
- if self._verbose:
1492
- print(f'Starting memory-efficient range compression...')
1493
- print(f'Radar data shape: {self.radar_data.shape}')
1494
- print_memory()
1495
-
1496
- # Store original shape for verification
1497
- original_shape = self.radar_data.shape
1498
- expected_shape = (self.len_az_line, self.len_range_line)
1499
-
1500
- # Verify we still have expected dimensions
1501
- assert original_shape == expected_shape, \
1502
- f'Unexpected radar data shape: {original_shape}, expected: {expected_shape}'
1503
-
1504
- # Get range filter with matching dimensions
1505
- range_filter = self.get_range_filter()
1506
-
1507
- if self._verbose:
1508
- print(f'Range filter shape: {range_filter.shape}')
1509
- print(f'Applying range compression filter...')
1510
-
1511
- # Apply range compression filter - USE SAME METHOD AS ORIGINAL
1512
- self.radar_data = multiply(self.radar_data, range_filter)
1513
-
1514
- # Cleanup filter
1515
- cleanup_variables(range_filter)
1516
-
1517
- # Verify dimensions are preserved
1518
- assert self.radar_data.shape == original_shape, \
1519
- f'Range compression changed data shape from {original_shape} to {self.radar_data.shape}'
1520
-
1521
- if self._verbose:
1522
- print(f'Range compression completed. Data shape: {self.radar_data.shape}')
1523
- print_memory()
1524
-
1525
- def _perform_rcmc_efficient(self) -> None:
1526
- """Perform memory-efficient Range Cell Migration Correction."""
1527
- if self._verbose:
1528
- print('Starting memory-efficient RCMC...')
1529
- print_memory()
1530
-
1531
- rcmc_filter = self.get_rcmc()
1532
-
1533
- # Use SAME METHOD AS ORIGINAL
1534
- self.radar_data = multiply(self.radar_data, rcmc_filter)
1535
-
1536
- # Cleanup filter
1537
- cleanup_variables(rcmc_filter)
1538
-
1539
- # Inverse FFT in range
1540
- self.ifft_range()
1541
-
1542
- if self._verbose:
1543
- print('RCMC completed.')
1544
- print_memory()
1545
-
1546
- def _perform_azimuth_compression_efficient(self) -> None:
1547
- """Perform memory-efficient azimuth compression step.
1548
-
1549
- Raises:
1550
- ValueError: If array shapes are incompatible.
1551
- """
1552
- if self._verbose:
1553
- print('Starting memory-efficient azimuth compression...')
1554
- print(f'Radar data shape before azimuth filter: {self.radar_data.shape}')
1555
- print_memory()
1556
-
1557
- # Get azimuth filter
1558
- azimuth_filter = self.get_azimuth_filter()
1559
-
1560
- if self._verbose:
1561
- print(f'Azimuth filter shape: {azimuth_filter.shape}')
1562
-
1563
- # Apply azimuth compression - USE SAME METHOD AS ORIGINAL
1564
- self.radar_data = multiply(self.radar_data, azimuth_filter)
1565
-
1566
- # Cleanup filter
1567
- cleanup_variables(azimuth_filter)
1568
-
1569
- if self._verbose:
1570
- print(f'Radar data shape after azimuth compression: {self.radar_data.shape}')
1571
-
1572
- # Inverse FFT in azimuth
1573
- self.ifft_azimuth()
1574
-
1575
- if self._verbose:
1576
- print(f'Final radar data shape: {self.radar_data.shape}')
1577
- print_memory()
1578
-
1579
- # ==================== UTILITY METHODS ====================
1580
-
1581
-
1582
- @timing_decorator
1583
- def save_file(self, save_path: Union[str, Path]) -> None:
1584
- """Save processed radar data to file.
1585
-
1586
- Args:
1587
- save_path: Path where to save the data.
1588
- """
1589
- dump(self.radar_data, save_path)
1590
- if self._verbose:
1591
- print(f'Data saved to {save_path}')
1592
-
1593
-
1594
- # ==================== EEEZY ====================
1595
- # For backward compatibility - keep original method name as alias
1596
- _prompt_tx_replica = _generate_tx_replica
1597
-
1598
- # ==================== GRAPHIC METHODS ====================
1599
- def _display_slice(self, slice=(0, 4000, 0, 4000),
1600
- vmin=0, vmax=1000,
1601
- figsize=(12,12),
1602
- title = None,
1603
- step: str = 'az') -> None:
1604
- """Display a slice of the radar data for visualization."""
1605
-
1606
- assert step in ['raw', 'range_compressed', 'rcmc', 'az_compressed'], \
1607
- 'Invalid step. Choose from "raw", "range_compressed", "rcmc", or "az_compressed".'
1608
-
1609
- if step == 'raw':
1610
- data = self.raw_data
1611
- elif step == 'range_compressed':
1612
- data = self.range_compressed_data
1613
- elif step == 'rcmc':
1614
- data = self.rcmc_data
1615
- elif step == 'az_compressed':
1616
- data = self.radar_data
1617
- else:
1618
- raise ValueError(f'Invalid step: {step}. Choose from "raw", "range", "rcmc", or "az".')
1619
-
1620
- plt.figure(figsize=figsize)
1621
- plt.imshow(np.abs(data[slice[0]:slice[1], slice[2]:slice[3]]), vmin=vmin, vmax=vmax, cmap='viridis')
1622
- plt.axis('off')
1623
- if title:
1624
- plt.title(title)
1625
- plt.show()