sarpyx 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. docs/examples/advanced/batch_processing.py +1 -1
  2. docs/examples/advanced/custom_processing_chains.py +1 -1
  3. docs/examples/advanced/performance_optimization.py +1 -1
  4. docs/examples/basic/snap_integration.py +1 -1
  5. docs/examples/intermediate/quality_assessment.py +1 -1
  6. outputs/baseline/20260205-234828/__init__.py +33 -0
  7. outputs/baseline/20260205-234828/main.py +493 -0
  8. outputs/final/20260205-234851/__init__.py +33 -0
  9. outputs/final/20260205-234851/main.py +493 -0
  10. sarpyx/__init__.py +2 -2
  11. sarpyx/algorithms/__init__.py +2 -2
  12. sarpyx/cli/__init__.py +1 -1
  13. sarpyx/cli/focus.py +3 -5
  14. sarpyx/cli/main.py +106 -7
  15. sarpyx/cli/shipdet.py +1 -1
  16. sarpyx/cli/worldsar.py +549 -0
  17. sarpyx/processor/__init__.py +1 -1
  18. sarpyx/processor/core/decode.py +43 -8
  19. sarpyx/processor/core/focus.py +104 -57
  20. sarpyx/science/__init__.py +1 -1
  21. sarpyx/sla/__init__.py +8 -0
  22. sarpyx/sla/metrics.py +101 -0
  23. sarpyx/{snap → snapflow}/__init__.py +1 -1
  24. sarpyx/snapflow/engine.py +6165 -0
  25. sarpyx/{snap → snapflow}/op.py +0 -1
  26. sarpyx/utils/__init__.py +1 -1
  27. sarpyx/utils/geos.py +652 -0
  28. sarpyx/utils/grid.py +285 -0
  29. sarpyx/utils/io.py +77 -9
  30. sarpyx/utils/meta.py +55 -0
  31. sarpyx/utils/nisar_utils.py +652 -0
  32. sarpyx/utils/rfigen.py +108 -0
  33. sarpyx/utils/wkt_utils.py +109 -0
  34. sarpyx/utils/zarr_utils.py +55 -37
  35. {sarpyx-0.1.5.dist-info → sarpyx-0.1.6.dist-info}/METADATA +9 -5
  36. {sarpyx-0.1.5.dist-info → sarpyx-0.1.6.dist-info}/RECORD +41 -32
  37. {sarpyx-0.1.5.dist-info → sarpyx-0.1.6.dist-info}/WHEEL +1 -1
  38. sarpyx-0.1.6.dist-info/licenses/LICENSE +201 -0
  39. sarpyx-0.1.6.dist-info/top_level.txt +4 -0
  40. tests/test_zarr_compat.py +35 -0
  41. sarpyx/processor/core/decode_v0.py +0 -0
  42. sarpyx/processor/core/decode_v1.py +0 -849
  43. sarpyx/processor/core/focus_old.py +0 -1550
  44. sarpyx/processor/core/focus_v1.py +0 -1566
  45. sarpyx/processor/core/focus_v2.py +0 -1625
  46. sarpyx/snap/engine.py +0 -633
  47. sarpyx-0.1.5.dist-info/top_level.txt +0 -2
  48. {sarpyx-0.1.5.dist-info → sarpyx-0.1.6.dist-info}/entry_points.txt +0 -0
@@ -1,1566 +0,0 @@
1
- import argparse
2
- from typing import Dict, Any, Optional, Union, Tuple, Callable, List
3
- try:
4
- import torch
5
- except ImportError:
6
- print('Unable to import torch module')
7
- torch = None
8
- import pickle
9
- import pandas as pd
10
- import numpy as np
11
- from scipy.interpolate import interp1d
12
- import math
13
- from pathlib import Path
14
- import matplotlib.pyplot as plt
15
- import copy
16
- import gc
17
- from functools import wraps
18
- import psutil
19
- import time
20
- from os import environ
21
-
22
- try:
23
- import zarr
24
- import numcodecs
25
- ZARR_AVAILABLE = True
26
- except ImportError:
27
- print('Warning: zarr not available, falling back to pickle for saving')
28
- ZARR_AVAILABLE = False
29
-
30
- # ---------- Import custom modules ----------
31
- from .code2physical import range_dec_to_sample_rate
32
- from .transforms import perform_fft_custom
33
- from . import constants as cnst
34
- from ..utils.viz import dump
35
-
36
-
37
- # ---------- Global settings ----------
38
- environ['OMP_NUM_THREADS'] = '12' # Set OpenMP threads for parallel processing
39
- __VTIMING__ = False
40
-
41
-
42
-
43
- # ---------- Decorators and utility functions ----------
44
- def timing_decorator(func: Callable) -> Callable:
45
- """Decorator to measure and print function execution time.
46
-
47
- Args:
48
- func: The function to measure.
49
-
50
- Returns:
51
- The wrapped function with timing measurement.
52
- """
53
- @wraps(func)
54
- def wrapper(*args, **kwargs):
55
- start_time = time.time()
56
- result = func(*args, **kwargs)
57
- elapsed_time = time.time() - start_time
58
- if __VTIMING__:
59
- print(f'Elapsed time for {func.__name__}: {elapsed_time:.4f} seconds')
60
- else:
61
- # Only print if __VTIMING__ is enabled
62
- pass
63
- return result
64
- return wrapper
65
-
66
- def print_memory() -> None:
67
- """Print current RAM memory usage percentage."""
68
- print(f'RAM memory usage: {psutil.virtual_memory().percent}%')
69
-
70
- def flush_mem(func: Callable) -> Callable:
71
- """Decorator for memory-efficient operations with monitoring.
72
-
73
- Args:
74
- func: The function to wrap.
75
-
76
- Returns:
77
- The wrapped function with memory monitoring and cleanup.
78
- """
79
- @wraps(func)
80
- def wrapper(*args, **kwargs):
81
- # Monitor memory before
82
- initial_memory = psutil.virtual_memory().percent
83
-
84
- # Execute function
85
- result = func(*args, **kwargs)
86
-
87
- # Force garbage collection
88
- gc.collect()
89
-
90
- # Monitor memory after
91
- final_memory = psutil.virtual_memory().percent
92
-
93
- # Print memory change if verbose
94
- if hasattr(args[0], '_verbose') and args[0]._verbose:
95
- print(f'Memory usage: {initial_memory:.1f}% -> {final_memory:.1f}% '
96
- f'(Δ{final_memory - initial_memory:+.1f}%)')
97
-
98
- return result
99
- return wrapper
100
-
101
- def cleanup_variables(*variables: Any) -> None:
102
- """Explicitly delete variables and run garbage collection.
103
-
104
- Args:
105
- *variables: Variables to delete.
106
- """
107
- for var in variables:
108
- del var
109
- gc.collect()
110
-
111
- def initialize_params(
112
- device: Optional[torch.device] = None,
113
- slant_range_vec: Optional[np.ndarray] = None,
114
- D: Optional[np.ndarray] = None,
115
- c: Optional[float] = None,
116
- len_range_line: Optional[int] = None,
117
- range_sample_freq: Optional[float] = None,
118
- wavelength: Optional[float] = None
119
- ) -> Dict[str, Any]:
120
- """Initialize processing parameters dictionary.
121
-
122
- Args:
123
- device: PyTorch device for computation.
124
- slant_range_vec: Slant range vector.
125
- D: Cosine of instantaneous squint angle.
126
- c: Speed of light.
127
- len_range_line: Length of range line.
128
- range_sample_freq: Range sampling frequency.
129
- wavelength: Radar wavelength.
130
-
131
- Returns:
132
- Dictionary containing all parameters.
133
- """
134
- return {key: value for key, value in locals().items()}
135
-
136
- def multiply_inplace(
137
- a: Union[np.ndarray, torch.Tensor],
138
- b: Union[np.ndarray, torch.Tensor]
139
- ) -> Union[np.ndarray, torch.Tensor]:
140
- """Multiply two arrays element-wise in-place with broadcasting support.
141
-
142
- Args:
143
- a: First array (modified in-place).
144
- b: Second array.
145
-
146
- Returns:
147
- Reference to modified first array.
148
-
149
- Raises:
150
- ValueError: If arrays have incompatible shapes for broadcasting.
151
- """
152
- if hasattr(a, 'shape') and hasattr(b, 'shape'):
153
- # Check if shapes are compatible for broadcasting
154
- if a.shape != b.shape and b.size != 1 and a.size != 1:
155
- # For 2D array * 1D array, the 1D array should match one of the 2D dimensions
156
- if len(a.shape) == 2 and len(b.shape) == 1:
157
- if b.shape[0] == a.shape[1]:
158
- # Broadcasting along range dimension - use numpy broadcasting
159
- pass # NumPy will handle this automatically
160
- elif b.shape[0] == a.shape[0]:
161
- # Need to reshape for azimuth dimension broadcasting
162
- b = b.reshape(-1, 1)
163
- else:
164
- raise ValueError(f'1D array length ({b.shape[0]}) does not match either dimension of 2D array {a.shape}')
165
-
166
- # Perform in-place multiplication
167
- try:
168
- if isinstance(a, np.ndarray):
169
- np.multiply(a, b, out=a)
170
- else: # torch tensor
171
- a.mul_(b)
172
- return a
173
- except (ValueError, RuntimeError) as e:
174
- raise ValueError(f'Arrays have incompatible shapes for in-place broadcasting: {a.shape} and {b.shape}. '
175
- f'Original error: {str(e)}') from e
176
-
177
- def multiply(
178
- a: Union[np.ndarray, torch.Tensor],
179
- b: Union[np.ndarray, torch.Tensor],
180
- debug: bool = False,
181
- ) -> Union[np.ndarray, torch.Tensor]:
182
- """Multiply two arrays element-wise with broadcasting support.
183
-
184
- Args:
185
- a: First array.
186
- b: Second array.
187
-
188
- Returns:
189
- Element-wise multiplication result.
190
-
191
- Raises:
192
- ValueError: If arrays have incompatible shapes for broadcasting.
193
- """
194
- if hasattr(a, 'shape') and hasattr(b, 'shape'):
195
- # Check if shapes are compatible for broadcasting
196
- if a.shape != b.shape and b.size != 1 and a.size != 1:
197
- # Try to understand the broadcasting scenario
198
- if debug:
199
- print(f'Debug: Attempting to multiply arrays with shapes {a.shape} and {b.shape}')
200
-
201
- # For 2D array * 1D array, the 1D array should match one of the 2D dimensions
202
- if len(a.shape) == 2 and len(b.shape) == 1:
203
- if debug:
204
- if b.shape[0] == a.shape[1]:
205
- print(f'Debug: Broadcasting 1D array along range dimension (axis=1)')
206
- elif b.shape[0] == a.shape[0]:
207
- print(f'Debug: Need to reshape 1D array for azimuth dimension (axis=0)')
208
- b = b.reshape(-1, 1) # Reshape for broadcasting along azimuth
209
- else:
210
- raise ValueError(f'1D array length ({b.shape[0]}) does not match either dimension of 2D array {a.shape}')
211
-
212
- # Allow broadcasting for compatible shapes
213
- try:
214
- result = a * b
215
- if debug:
216
- print(f'Debug: Broadcasting successful, result shape: {result.shape}')
217
- return result
218
- except (ValueError, RuntimeError) as e:
219
- print(f'Debug: Broadcasting failed with error: {str(e)}')
220
- raise ValueError(f'Arrays have incompatible shapes for broadcasting: {a.shape} and {b.shape}. '
221
- f'Original error: {str(e)}') from e
222
-
223
- return a * b
224
-
225
- @flush_mem
226
- @timing_decorator
227
- def ifft2d(radar_data: Union[np.ndarray, torch.Tensor], backend: str = 'numpy', verbose: bool = False) -> Union[np.ndarray, torch.Tensor]:
228
- """Perform memory-efficient 2D inverse FFT on radar data.
229
-
230
- Args:
231
- radar_data: Input radar data array.
232
- backend: Backend to use ('numpy' or 'torch').
233
- verbose: Whether to print verbose output.
234
-
235
- Returns:
236
- Processed radar data after 2D inverse FFT.
237
-
238
- Raises:
239
- ValueError: If backend is not supported.
240
- """
241
- if verbose:
242
- print('Performing 2D inverse FFT...')
243
-
244
- # Inverse FFT along azimuth dimension first
245
- if backend == 'numpy':
246
- radar_data = np.fft.ifft(radar_data, axis=0)
247
- elif backend == 'torch':
248
- radar_data = torch.fft.ifft(radar_data, dim=0)
249
- else:
250
- raise ValueError(f'Unsupported backend: {backend}')
251
-
252
- # Then inverse FFT along range dimension
253
- if backend == 'numpy':
254
- radar_data = np.fft.ifftshift(np.fft.ifft(radar_data, axis=1), axes=1)
255
- elif backend == 'torch':
256
- radar_data = torch.fft.ifft(radar_data, dim=1)
257
- radar_data = torch.fft.ifftshift(radar_data, dim=1)
258
- else:
259
- raise ValueError(f'Unsupported backend: {backend}')
260
-
261
- if verbose:
262
- print(f'2D inverse FFT completed, data shape: {radar_data.shape}')
263
- print_memory()
264
-
265
- return radar_data
266
-
267
- @flush_mem
268
- @timing_decorator
269
- def iff_azimuth(
270
- radar_data: Union[np.ndarray, torch.Tensor],
271
- backend: str = 'numpy',
272
- verbose: bool = False
273
- ) -> Union[np.ndarray, torch.Tensor]:
274
- """Perform memory-efficient inverse FFT along azimuth dimension.
275
-
276
- Args:
277
- radar_data: Input radar data array.
278
- backend: Backend to use ('numpy' or 'torch').
279
- verbose: Whether to print verbose output.
280
-
281
- Returns:
282
- Processed radar data after inverse FFT along azimuth dimension.
283
-
284
- Raises:
285
- ValueError: If backend is not supported.
286
- """
287
- if verbose:
288
- print('Performing inverse FFT along azimuth dimension...')
289
-
290
- if backend == 'numpy':
291
- radar_data = np.fft.ifft(radar_data, axis=0)
292
- elif backend == 'torch':
293
- radar_data = torch.fft.ifft(radar_data, dim=0)
294
- else:
295
- raise ValueError(f'Unsupported backend: {backend}')
296
-
297
- if verbose:
298
- print(f'Inverse FFT along azimuth completed, data shape: {radar_data.shape}')
299
- print_memory()
300
-
301
- return radar_data
302
-
303
- # -------- Processing Class ----------
304
- class CoarseRDA:
305
- """Memory-efficient Coarse Range Doppler Algorithm processor for SAR data.
306
-
307
- This class implements a memory-optimized coarse Range Doppler Algorithm for processing
308
- synthetic aperture radar (SAR) data, specifically designed for Sentinel-1 data.
309
-
310
- The processing pipeline follows these main steps:
311
- 1. Initialization and data loading
312
- 2. Transmission replica generation
313
- 3. 2D FFT transformation
314
- 4. Range compression
315
- 5. Range Cell Migration Correction (RCMC)
316
- 6. Azimuth compression
317
- 7. Final inverse transformations
318
-
319
- Memory optimizations include:
320
- - In-place operations where possible
321
- - Explicit memory cleanup
322
- - Memory usage monitoring
323
- - Efficient data type usage
324
- """
325
-
326
- # ==================== INITIALIZATION METHODS ====================
327
-
328
- def __init__(
329
- self,
330
- raw_data: Dict[str, Any],
331
- verbose: bool = False,
332
- backend: str = 'numpy',
333
- memory_efficient: bool = True
334
- ) -> None:
335
- """Initialize the CoarseRDA processor.
336
-
337
- Args:
338
- raw_data: Dictionary containing 'echo', 'ephemeris', and 'metadata'.
339
- verbose: Whether to print verbose output.
340
- backend: Backend to use ('numpy', 'torch', or 'custom').
341
- memory_efficient: Whether to enable memory-efficient mode.
342
-
343
- Raises:
344
- ValueError: If invalid backend is specified.
345
- AssertionError: If required data is missing.
346
- """
347
- self._validate_inputs(raw_data, backend)
348
- self._initialize_settings(verbose, backend, memory_efficient)
349
- self._load_data(raw_data)
350
- self._setup_backend()
351
- self._initialize_transmission_replica()
352
-
353
- def _validate_inputs(self, raw_data: Dict[str, Any], backend: str) -> None:
354
- """Validate input parameters.
355
-
356
- Args:
357
- raw_data: Dictionary containing radar data.
358
- backend: Processing backend.
359
-
360
- Raises:
361
- AssertionError: If required data is missing.
362
- ValueError: If invalid backend is specified.
363
- """
364
- assert isinstance(raw_data, dict), 'raw_data must be a dictionary'
365
- assert 'echo' in raw_data, 'raw_data must contain "echo" key'
366
- assert 'ephemeris' in raw_data, 'raw_data must contain "ephemeris" key'
367
- assert 'metadata' in raw_data, 'raw_data must contain "metadata" key'
368
-
369
- valid_backends = {'numpy', 'torch', 'custom'}
370
- if backend not in valid_backends:
371
- raise ValueError(f'Backend must be one of {valid_backends}, got {backend}')
372
-
373
- def _initialize_settings(self, verbose: bool, backend: str, memory_efficient: bool) -> None:
374
- """Initialize processor settings.
375
-
376
- Args:
377
- verbose: Whether to print verbose output.
378
- backend: Processing backend.
379
- memory_efficient: Whether to enable memory-efficient mode.
380
- """
381
- self._backend = backend
382
- self._verbose = verbose
383
- self._memory_efficient = memory_efficient
384
-
385
- if self._verbose:
386
- print(f'Memory efficient mode: {"enabled" if memory_efficient else "disabled"}')
387
-
388
- def _load_data(self, raw_data: Dict[str, Any]) -> None:
389
- """Load and preprocess input data with memory optimization.
390
-
391
- Args:
392
- raw_data: Dictionary containing radar data.
393
- """
394
- # Use views instead of copies where possible
395
- self.radar_data = raw_data['echo']
396
-
397
- # Create a copy for ephemeris as we need to modify it
398
- self.ephemeris = raw_data['ephemeris'].copy()
399
- self.ephemeris['time_stamp'] /= 2**24
400
-
401
- # Use reference for metadata as we don't modify it
402
- self.metadata = raw_data['metadata']
403
-
404
- # Initialize dimensions - these should remain constant throughout processing
405
- self.len_az_line, self.len_range_line = self.radar_data.shape
406
-
407
- if self._verbose:
408
- print(f'Loaded radar data with shape: {self.radar_data.shape}')
409
- print(f'Azimuth lines: {self.len_az_line}, Range lines: {self.len_range_line}')
410
- print(f'Data type: {self.radar_data.dtype}')
411
- print(f'Memory usage: {self.radar_data.nbytes / 1024**3:.2f} GB')
412
-
413
- def _setup_backend(self) -> None:
414
- """Set up processing backend and device configuration."""
415
- if self._backend == 'torch':
416
- if torch is None:
417
- raise ImportError('PyTorch is required for torch backend but not available')
418
- self.device = getattr(
419
- self.radar_data,
420
- 'device',
421
- torch.device('cuda' if torch.cuda.is_available() else 'cpu')
422
- )
423
- if self._verbose:
424
- print(f'Selected device: {self.device}')
425
-
426
- def _initialize_transmission_replica(self) -> None:
427
- """Initialize transmission replica during setup."""
428
- self._generate_tx_replica()
429
-
430
- # ==================== TRANSMISSION REPLICA METHODS ====================
431
-
432
- @timing_decorator
433
- def _generate_tx_replica(self) -> None:
434
- """Generate transmission replica based on metadata parameters.
435
-
436
- This method creates the transmission replica used for range compression
437
- based on the radar system parameters extracted from metadata.
438
- """
439
- if self._verbose:
440
- print('Generating transmission replica...')
441
-
442
- # Extract range decimation and calculate sample frequency
443
- rgdec = int(self.metadata['range_decimation'].unique()[0])
444
- if self._verbose:
445
- print(f'Range decimation code: {rgdec}')
446
-
447
- self.range_sample_freq = range_dec_to_sample_rate(rgdec)
448
- if self._verbose:
449
- print(f'Range sample frequency: {self.range_sample_freq:.2f} Hz')
450
-
451
- # Extract transmission parameters
452
- tx_params = self._extract_tx_parameters()
453
-
454
- # Generate replica signal
455
- self._create_replica_signal(tx_params)
456
-
457
- if self._verbose:
458
- print(f'Replica length: {self.replica_len}')
459
- print('Transmission replica generated successfully!')
460
-
461
- def _extract_tx_parameters(self) -> Dict[str, float]:
462
- """Extract transmission parameters from metadata.
463
-
464
- Returns:
465
- Dictionary containing transmission parameters.
466
- """
467
- txpsf = self.metadata['tx_pulse_start_freq'].unique()[0]
468
- txprr = self.metadata['tx_ramp_rate'].unique()[0]
469
- txpl = self.metadata['tx_pulse_length'].unique()[0]
470
-
471
- if self._verbose:
472
- print(f'TX pulse start frequency: {txpsf:.2f} Hz')
473
- print(f'TX ramp rate: {txprr:.2f} Hz/s')
474
- print(f'TX pulse length: {txpl:.6f} s')
475
-
476
- return {'start_freq': txpsf, 'ramp_rate': txprr, 'pulse_length': txpl}
477
-
478
- def _create_replica_signal(self, tx_params: Dict[str, float]) -> None:
479
- """Create the replica signal from transmission parameters.
480
-
481
- Args:
482
- tx_params: Dictionary containing transmission parameters.
483
- """
484
- txpsf = tx_params['start_freq']
485
- txprr = tx_params['ramp_rate']
486
- txpl = tx_params['pulse_length']
487
-
488
- # Generate replica
489
- self.num_tx_vals = int(txpl * self.range_sample_freq)
490
- if self._verbose:
491
- print(f'Number of TX values: {self.num_tx_vals}')
492
-
493
- tx_replica_time_vals = np.linspace(-txpl/2, txpl/2, num=self.num_tx_vals)
494
- phi1 = txpsf + txprr * txpl / 2
495
- phi2 = txprr / 2
496
-
497
- if self._verbose:
498
- print(f'Phase parameters - phi1: {phi1:.2f}, phi2: {phi2:.2e}')
499
-
500
- self.tx_replica = np.exp(
501
- 2j * np.pi * (phi1 * tx_replica_time_vals + phi2 * tx_replica_time_vals**2)
502
- )
503
- self.replica_len = len(self.tx_replica)
504
-
505
- # ==================== FFT METHODS ====================
506
-
507
- @flush_mem
508
- @timing_decorator
509
- def fft2d(self, w_pad: Optional[int] = None, executors: int = 12) -> None:
510
- """Perform memory-efficient 2D FFT on radar data in range and azimuth dimensions.
511
-
512
- Args:
513
- w_pad: Width padding for range FFT (ignored for dimension preservation).
514
- executors: Number of executors for custom backend.
515
-
516
- Raises:
517
- ValueError: If backend is not supported.
518
- """
519
- if self._verbose:
520
- print(f'FFT input data shape: {self.radar_data.shape}')
521
- print_memory()
522
-
523
- if self._backend == 'numpy':
524
- self._fft2d_numpy_efficient()
525
- elif self._backend == 'custom':
526
- self._fft2d_custom(executors)
527
- elif self._backend == 'torch':
528
- self._fft2d_torch_efficient()
529
- else:
530
- raise ValueError(f'Backend {self._backend} not supported')
531
-
532
- # Verify dimensions are preserved
533
- expected_shape = (self.len_az_line, self.len_range_line)
534
- if self.radar_data.shape != expected_shape:
535
- raise RuntimeError(f'FFT changed radar data shape from {expected_shape} to {self.radar_data.shape}')
536
-
537
- if self._verbose:
538
- print(f'FFT output data shape: {self.radar_data.shape}')
539
- print('- FFT performed successfully!')
540
- print_memory()
541
-
542
- def _fft2d_numpy_efficient(self) -> None:
543
- """Perform memory-efficient 2D FFT using NumPy backend preserving original dimensions.
544
-
545
- Uses in-place operations and memory cleanup for better efficiency.
546
- """
547
- # Store original shape for verification
548
- original_shape = self.radar_data.shape
549
- if self._verbose:
550
- print(f'Original radar data shape: {original_shape}')
551
-
552
- # Ensure data is contiguous and maintain original precision
553
- if not self.radar_data.flags.c_contiguous:
554
- if self._verbose:
555
- print('Making data contiguous...')
556
- self.radar_data = np.ascontiguousarray(self.radar_data)
557
-
558
- # FFT each range line (axis=1) - EXACT SAME as original
559
- if self._verbose:
560
- print(f'Performing FFT along range dimension (axis=1)...')
561
-
562
- # Use same approach as original - no dtype changes
563
- self.radar_data = np.fft.fft(self.radar_data, axis=1)
564
-
565
- if self._verbose:
566
- print(f'First FFT along range dimension completed, shape: {self.radar_data.shape}')
567
- print_memory()
568
-
569
- # FFT each azimuth line (axis=0) with fftshift - EXACT SAME as original
570
- if self._verbose:
571
- print(f'Performing FFT along azimuth dimension (axis=0) with fftshift...')
572
-
573
- # Use same approach as original
574
- self.radar_data = np.fft.fftshift(np.fft.fft(self.radar_data, axis=0), axes=0)
575
-
576
- if self._verbose:
577
- print(f'Second FFT along azimuth dimension completed, shape: {self.radar_data.shape}')
578
- print_memory()
579
-
580
- # Verify shape preservation
581
- assert self.radar_data.shape == original_shape, \
582
- f'FFT changed shape from {original_shape} to {self.radar_data.shape}'
583
-
584
- def _fft2d_torch_efficient(self) -> None:
585
- """Perform memory-efficient 2D FFT using PyTorch backend preserving dimensions.
586
-
587
- Uses in-place operations where possible.
588
- """
589
- original_shape = self.radar_data.shape
590
-
591
- if self._verbose:
592
- print('Performing memory-efficient PyTorch FFT...')
593
- print_memory()
594
-
595
- # FFT each range line (axis=1) - in-place when possible
596
- if self._memory_efficient:
597
- temp = torch.fft.fft(self.radar_data, dim=1)
598
- self.radar_data.copy_(temp)
599
- del temp
600
- torch.cuda.empty_cache() if torch.cuda.is_available() else None
601
- else:
602
- self.radar_data = torch.fft.fft(self.radar_data, dim=1)
603
-
604
- # FFT each azimuth line (axis=0) with fftshift
605
- if self._memory_efficient:
606
- temp = torch.fft.fft(self.radar_data, dim=0)
607
- self.radar_data.copy_(temp)
608
- del temp
609
-
610
- temp = torch.fft.fftshift(self.radar_data, dim=0)
611
- self.radar_data.copy_(temp)
612
- del temp
613
- torch.cuda.empty_cache() if torch.cuda.is_available() else None
614
- else:
615
- self.radar_data = torch.fft.fftshift(
616
- torch.fft.fft(self.radar_data, dim=0),
617
- dim=0
618
- )
619
-
620
- # Verify shape preservation
621
- assert self.radar_data.shape == original_shape, \
622
- f'Torch FFT changed shape from {original_shape} to {self.radar_data.shape}'
623
-
624
- @flush_mem
625
- @timing_decorator
626
- def ifft_range(self) -> None:
627
- """Perform memory-efficient inverse FFT along range dimension."""
628
- if self._backend == 'numpy':
629
- # Use EXACT SAME approach as original
630
- self.radar_data = np.fft.ifftshift(np.fft.ifft(self.radar_data, axis=1), axes=1)
631
- elif self._backend == 'torch':
632
- self.radar_data = torch.fft.ifft(self.radar_data, dim=1)
633
- self.radar_data = torch.fft.ifftshift(self.radar_data, dim=1)
634
- else:
635
- raise ValueError(f'Unsupported backend: {self._backend}')
636
-
637
- @flush_mem
638
- @timing_decorator
639
- def ifft_azimuth(self) -> None:
640
- """Perform memory-efficient inverse FFT along azimuth dimension."""
641
- if self._backend == 'numpy':
642
- # Use EXACT SAME approach as original
643
- self.radar_data = np.fft.ifft(self.radar_data, axis=0)
644
- elif self._backend == 'torch':
645
- self.radar_data = torch.fft.ifft(self.radar_data, dim=0)
646
- else:
647
- raise ValueError(f'Unsupported backend: {self._backend}')
648
-
649
- # ==================== FILTER GENERATION METHODS ====================
650
-
651
- @flush_mem
652
- @timing_decorator
653
- def get_range_filter(self, pad_w: int = 0) -> np.ndarray:
654
- """Compute memory-efficient range filter for radar data compression.
655
-
656
- Args:
657
- pad_w: Width padding (ignored - filter always matches radar data dimensions).
658
-
659
- Returns:
660
- Range filter array exactly matching radar data range dimension.
661
-
662
- Raises:
663
- AssertionError: If filter dimensions are invalid.
664
- """
665
- # Use exact radar data dimensions - no padding considerations
666
- current_range_dim = self.radar_data.shape[1]
667
-
668
- if self._verbose:
669
- print(f'Creating range filter for radar data shape: {self.radar_data.shape}')
670
- print(f'Range dimension: {current_range_dim}')
671
- print(f'TX replica length: {self.num_tx_vals}')
672
-
673
- # Create range filter with exact radar data range dimension - KEEP ORIGINAL PRECISION
674
- range_filter = np.zeros(current_range_dim, dtype=complex)
675
-
676
- # Place replica in center of filter
677
- if current_range_dim >= self.num_tx_vals:
678
- index_start = (current_range_dim - self.num_tx_vals) // 2
679
- index_end = index_start + self.num_tx_vals
680
-
681
- if self._verbose:
682
- print(f'Placing replica at indices [{index_start}:{index_end}] in filter of length {current_range_dim}')
683
-
684
- range_filter[index_start:index_end] = self.tx_replica
685
- else:
686
- # If range dimension is smaller than replica, truncate replica
687
- if self._verbose:
688
- print(f'⚠️ Range dimension ({current_range_dim}) < replica length ({self.num_tx_vals}), truncating replica')
689
-
690
- replica_start = (self.num_tx_vals - current_range_dim) // 2
691
- replica_end = replica_start + current_range_dim
692
- range_filter[:] = self.tx_replica[replica_start:replica_end]
693
-
694
- # Calculate energy normalization (based on C++ implementation)
695
- norm = np.abs(range_filter)
696
- energy = np.sum(norm**2) / len(norm)
697
-
698
- if self._verbose:
699
- print(f'Calculated energy for normalization: {energy:.6f}')
700
-
701
- # Apply FFT and conjugate - matching C++ implementation
702
- range_filter = np.fft.fft(range_filter)
703
- range_filter = np.conjugate(range_filter)
704
-
705
- # Normalize by energy
706
- if energy > 0:
707
- range_filter = range_filter / energy
708
- else:
709
- if self._verbose:
710
- print('⚠️ Warning: Zero energy detected, skipping normalization')
711
-
712
- if self._verbose:
713
- print(f'Range filter shape: {range_filter.shape}')
714
- print(f'Range filter energy after normalization: {np.sum(np.abs(range_filter)**2):.6f}')
715
-
716
- # Ensure filter exactly matches radar data range dimension
717
- assert range_filter.shape[0] == current_range_dim, \
718
- f'Filter shape mismatch: expected {current_range_dim}, got {range_filter.shape[0]}'
719
-
720
- return range_filter
721
-
722
- @flush_mem
723
- @timing_decorator
724
- def get_rcmc(self) -> np.ndarray:
725
- """Calculate memory-efficient Range Cell Migration Correction filter.
726
-
727
- Returns:
728
- RCMC filter array matching radar data dimensions.
729
- """
730
- self._compute_effective_velocities()
731
-
732
- self.wavelength = cnst.TX_WAVELENGTH_M
733
-
734
- # Generate azimuth frequency values for the entire azimuth line length
735
- self.az_freq_vals = np.arange(
736
- -self.az_sample_freq/2,
737
- self.az_sample_freq/2,
738
- self.az_sample_freq/self.len_az_line
739
- )
740
-
741
- # Ensure we have exactly the right number of frequency values
742
- if len(self.az_freq_vals) != self.len_az_line:
743
- self.az_freq_vals = np.linspace(
744
- -self.az_sample_freq/2,
745
- self.az_sample_freq/2,
746
- self.len_az_line,
747
- endpoint=False
748
- )
749
-
750
- if self._verbose:
751
- print(f'Azimuth frequency values shape: {self.az_freq_vals.shape}')
752
- print(f'Effective velocities shape: {self.effective_velocities.shape}')
753
-
754
- # Take mean effective velocity across range for each azimuth line
755
- # This reduces from (56130, 25724) to (56130,)
756
- mean_effective_velocities = np.mean(self.effective_velocities, axis=1)
757
-
758
- if self._verbose:
759
- print(f'Mean effective velocities shape: {mean_effective_velocities.shape}')
760
-
761
- # Cosine of instantaneous squint angle
762
- # Broadcasting: (56130,) with (56130,) -> (56130,)
763
- self.D = np.sqrt(
764
- 1 - (self.wavelength**2 * self.az_freq_vals**2) /
765
- (4 * mean_effective_velocities**2)
766
- )
767
-
768
- if self._verbose:
769
- print(f'D (cosine squint angle) shape: {self.D.shape}')
770
-
771
- # Create RCMC filter with CURRENT radar data dimensions (should be original dimensions)
772
- current_range_dim = self.radar_data.shape[1]
773
-
774
- range_freq_vals = np.linspace(
775
- -self.range_sample_freq/2,
776
- self.range_sample_freq/2,
777
- num=current_range_dim
778
- )
779
-
780
- if self._verbose:
781
- print(f'Range frequency values shape: {range_freq_vals.shape}')
782
- print(f'Current radar data range dimension: {current_range_dim}')
783
- print(f'Slant range vec shape: {self.slant_range_vec.shape}')
784
-
785
- # Calculate RCMC shift - use first slant range value for reference
786
- rcmc_shift = self.slant_range_vec[0] * (1/self.D - 1)
787
-
788
- if self._verbose:
789
- print(f'RCMC shift shape: {rcmc_shift.shape}')
790
-
791
- # IMPORTANT: Use same calculation method as original - no chunked processing
792
- # Broadcasting for final filter calculation
793
- range_freq_2d = range_freq_vals[np.newaxis, :] # (1, current_range_dim)
794
- rcmc_shift_2d = rcmc_shift[:, np.newaxis] # (56130, 1)
795
-
796
- rcmc_filter = np.exp(4j * np.pi * range_freq_2d * rcmc_shift_2d / self.c)
797
-
798
- if self._verbose:
799
- print(f'Final RCMC filter shape: {rcmc_filter.shape}')
800
-
801
- return rcmc_filter
802
-
803
- @flush_mem
804
- @timing_decorator
805
- def get_azimuth_filter(self) -> np.ndarray:
806
- """Calculate memory-efficient azimuth compression filter.
807
-
808
- Returns:
809
- Azimuth filter array matching radar data dimensions.
810
- """
811
- if self._verbose:
812
- print(f'Computing azimuth filter...')
813
- print(f'Slant range vec shape: {self.slant_range_vec.shape}')
814
- print(f'D shape: {self.D.shape}')
815
- print(f'Wavelength: {self.wavelength}')
816
-
817
- # Use current radar data dimensions (should match original slant range vector)
818
- current_range_dim = self.radar_data.shape[1]
819
-
820
- # Ensure slant range vector matches radar data dimensions
821
- if current_range_dim != len(self.slant_range_vec):
822
- if self._verbose:
823
- print(f'⚠️ Warning: Current range dim ({current_range_dim}) != slant range vec length ({len(self.slant_range_vec)})')
824
- print(f'This should not happen - using original slant range vector')
825
- current_slant_range_vec = self.slant_range_vec
826
- else:
827
- current_slant_range_vec = self.slant_range_vec
828
-
829
- # IMPORTANT: Use same calculation method as original - no chunked processing
830
- # Broadcasting for azimuth filter calculation
831
- slant_range_2d = current_slant_range_vec[np.newaxis, :] # (1, range_dim)
832
- D_2d = self.D[:, np.newaxis] # (56130, 1)
833
-
834
- azimuth_filter = np.exp(4j * np.pi * slant_range_2d * D_2d / self.wavelength)
835
-
836
- if self._verbose:
837
- print(f'Azimuth filter shape: {azimuth_filter.shape}')
838
-
839
- return azimuth_filter
840
-
841
- # ==================== VELOCITY COMPUTATION METHODS ====================
842
-
843
- @timing_decorator
844
- def _compute_effective_velocities(self) -> None:
845
- """Calculate effective spacecraft velocities for processing.
846
-
847
- This method computes the effective velocities needed for RCMC and
848
- azimuth compression by combining spacecraft and ground velocities.
849
- """
850
- # Initialize timing and geometry parameters
851
- self._initialize_timing_parameters()
852
-
853
- # Calculate spacecraft velocities and positions
854
- space_velocities, positions = self._calculate_spacecraft_dynamics()
855
-
856
- # Compute effective velocities using Earth model
857
- self._compute_ground_velocities(space_velocities, positions)
858
-
859
- def _initialize_timing_parameters(self) -> None:
860
- """Initialize timing and geometry parameters for velocity computation.
861
-
862
- Raises:
863
- KeyError: If required metadata columns are missing.
864
- ValueError: If metadata values are invalid.
865
- """
866
- self.c = cnst.SPEED_OF_LIGHT_MPS
867
-
868
- # Check for required columns with case-insensitive matching
869
- metadata_columns = {col.lower(): col for col in self.metadata.columns}
870
-
871
- required_mappings = {
872
- 'pri': ['pri', 'pulse_repetition_interval'],
873
- 'rank': ['rank'],
874
- 'swst': ['swst', 'sampling_window_start_time', 'start_time']
875
- }
876
-
877
- column_map = {}
878
- for param, possible_names in required_mappings.items():
879
- found_column = None
880
- for name in possible_names:
881
- if name.lower() in metadata_columns:
882
- found_column = metadata_columns[name.lower()]
883
- break
884
-
885
- if found_column is None:
886
- available_cols = list(self.metadata.columns)
887
- raise KeyError(
888
- f'Could not find column for {param}. Tried: {possible_names}. '
889
- f'Available columns: {available_cols}'
890
- )
891
- column_map[param] = found_column
892
-
893
- if self._verbose:
894
- print('Column mapping:')
895
- for param, col in column_map.items():
896
- print(f' {param} -> {col}')
897
-
898
- # Extract parameters with error handling
899
- try:
900
- self.pri = self.metadata[column_map['pri']].iloc[0]
901
- rank = self.metadata[column_map['rank']].iloc[0]
902
- range_start_time_base = self.metadata[column_map['swst']].iloc[0]
903
- except (IndexError, TypeError) as e:
904
- raise ValueError(f'Error extracting metadata values: {str(e)}') from e
905
-
906
- # Validate values
907
- assert self.pri > 0, f'PRI must be positive, got {self.pri}'
908
- assert rank >= 0, f'Rank must be non-negative, got {rank}'
909
-
910
- if self._verbose:
911
- print(f'PRI: {self.pri:.6f} s')
912
- print(f'Rank: {rank}')
913
- print(f'Base range start time: {range_start_time_base:.6f} s')
914
-
915
- # Calculate derived parameters
916
- suppressed_data_time = 320 / (8 * cnst.F_REF)
917
- range_start_time = range_start_time_base + suppressed_data_time
918
-
919
- # Sample rates
920
- range_sample_period = 1 / self.range_sample_freq
921
- self.az_sample_freq = 1 / self.pri
922
-
923
- if self._verbose:
924
- print(f'Range start time: {range_start_time:.6f} s')
925
- print(f'Azimuth sample frequency: {self.az_sample_freq:.2f} Hz')
926
-
927
- # Fast time and slant range vectors
928
- sample_num_along_range_line = np.arange(0, self.len_range_line, 1)
929
- fast_time_vec = range_start_time + (range_sample_period * sample_num_along_range_line)
930
- self.slant_range_vec = ((rank * self.pri) + fast_time_vec) * self.c / 2
931
-
932
- if self._verbose:
933
- print(f'Slant range vector shape: {self.slant_range_vec.shape}')
934
- print(f'Slant range min/max: {self.slant_range_vec.min():.2f}/{self.slant_range_vec.max():.2f} m')
935
-
936
- def _calculate_spacecraft_dynamics(self) -> Tuple[np.ndarray, np.ndarray]:
937
- """Calculate spacecraft velocities and positions.
938
-
939
- Returns:
940
- Tuple of (space_velocities, positions) arrays.
941
-
942
- Raises:
943
- KeyError: If required columns are missing from ephemeris or metadata.
944
- ValueError: If interpolation fails.
945
- """
946
- if self._verbose:
947
- print('Calculating spacecraft dynamics...')
948
- print(f'Ephemeris shape: {self.ephemeris.shape}')
949
- print(f'Metadata shape: {self.metadata.shape}')
950
-
951
- # Spacecraft velocity calculations
952
- ecef_vels = self.ephemeris.apply(
953
- lambda x: math.sqrt(x['vx']**2 + x['vy']**2 + x['vz']**2),
954
- axis=1
955
- )
956
-
957
- if self._verbose:
958
- print(f'ECEF velocities shape: {ecef_vels.shape}')
959
- print(f'ECEF velocities range: {ecef_vels.min():.2f} - {ecef_vels.max():.2f} m/s')
960
-
961
- # Extract arrays and ensure they are proper numpy arrays
962
- time_stamps = self.ephemeris['time_stamp'].values
963
- velocity_values = ecef_vels.values
964
- x_values = self.ephemeris['x'].values
965
- y_values = self.ephemeris['y'].values
966
- z_values = self.ephemeris['z'].values
967
-
968
- if self._verbose:
969
- print(f'Time stamps shape: {time_stamps.shape}')
970
- print(f'Time stamps range: {time_stamps.min():.6f} - {time_stamps.max():.6f}')
971
- print(f' veocity values shape: {velocity_values.shape}')
972
- print(f'Position arrays shapes: x={x_values.shape}, y={y_values.shape}, z={z_values.shape}')
973
-
974
- # Ensure arrays are sorted by time for interpolation
975
- sort_indices = np.argsort(time_stamps)
976
- time_stamps = time_stamps[sort_indices]
977
- velocity_values = velocity_values[sort_indices]
978
- x_values = x_values[sort_indices]
979
- y_values = y_values[sort_indices]
980
- z_values = z_values[sort_indices]
981
-
982
- # Calculate metadata time stamps
983
- metadata_times = self.metadata.apply(
984
- lambda x: x['coarse_time'] + x['fine_time'],
985
- axis=1
986
- ).values
987
-
988
- if self._verbose:
989
- print(f'Metadata times shape: {metadata_times.shape}')
990
- print(f'Metadata times range: {metadata_times.min():.6f} - {metadata_times.max():.6f}')
991
- print(f'Ephemeris time range: {time_stamps.min():.6f} - {time_stamps.max():.6f}')
992
-
993
- # Check if metadata times are within ephemeris time range
994
- time_within_range = (metadata_times >= time_stamps.min()) & (metadata_times <= time_stamps.max())
995
- if not np.all(time_within_range):
996
- out_of_range_count = np.sum(~time_within_range)
997
- if self._verbose:
998
- print(f'⚠️ Warning: {out_of_range_count} metadata times are outside ephemeris range')
999
- print(f' Will use boundary values for extrapolation')
1000
-
1001
- # Create interpolation functions with bounds_error=False and fill_value for extrapolation
1002
- try:
1003
- velocity_interp = interp1d(
1004
- time_stamps, velocity_values,
1005
- kind='linear',
1006
- bounds_error=False,
1007
- fill_value=(velocity_values[0], velocity_values[-1])
1008
- )
1009
- x_interp = interp1d(
1010
- time_stamps, x_values,
1011
- kind='linear',
1012
- bounds_error=False,
1013
- fill_value=(x_values[0], x_values[-1])
1014
- )
1015
- y_interp = interp1d(
1016
- time_stamps, y_values,
1017
- kind='linear',
1018
- bounds_error=False,
1019
- fill_value=(y_values[0], y_values[-1])
1020
- )
1021
- z_interp = interp1d(
1022
- time_stamps, z_values,
1023
- kind='linear',
1024
- bounds_error=False,
1025
- fill_value=(z_values[0], z_values[-1])
1026
- )
1027
- except ValueError as e:
1028
- raise ValueError(f'Failed to create interpolation functions: {str(e)}') from e
1029
-
1030
- # Interpolate at metadata time points
1031
- try:
1032
- space_velocities = velocity_interp(metadata_times)
1033
- x_interp_vals = x_interp(metadata_times)
1034
- y_interp_vals = y_interp(metadata_times)
1035
- z_interp_vals = z_interp(metadata_times)
1036
- except Exception as e:
1037
- raise ValueError(f'Interpolation failed: {str(e)}') from e
1038
-
1039
- # Ensure interpolated values are arrays and handle any remaining NaN values
1040
- space_velocities = np.asarray(space_velocities)
1041
- x_interp_vals = np.asarray(x_interp_vals)
1042
- y_interp_vals = np.asarray(y_interp_vals)
1043
- z_interp_vals = np.asarray(z_interp_vals)
1044
-
1045
- # Check for and handle NaN values
1046
- if np.any(np.isnan(space_velocities)):
1047
- nan_count = np.sum(np.isnan(space_velocities))
1048
- if self._verbose:
1049
- print(f'⚠️ Found {nan_count} NaN values in space_velocities, filling with nearest valid values')
1050
-
1051
- # Fill NaN values with nearest valid values
1052
- valid_mask = ~np.isnan(space_velocities)
1053
- if np.any(valid_mask):
1054
- # Forward fill
1055
- space_velocities = pd.Series(space_velocities).fillna(method='ffill').fillna(method='bfill').values
1056
- else:
1057
- # If all NaN, use average ephemeris velocity
1058
- space_velocities.fill(np.nanmean(velocity_values))
1059
-
1060
- # Handle NaN values in position components
1061
- for vals, name in [(x_interp_vals, 'x'), (y_interp_vals, 'y'), (z_interp_vals, 'z')]:
1062
- if np.any(np.isnan(vals)):
1063
- nan_count = np.sum(np.isnan(vals))
1064
- if self._verbose:
1065
- print(f'⚠️ Found {nan_count} NaN values in {name}_interp_vals, filling with nearest valid values')
1066
-
1067
- valid_mask = ~np.isnan(vals)
1068
- if np.any(valid_mask):
1069
- vals_series = pd.Series(vals).fillna(method='ffill').fillna(method='bfill')
1070
- if name == 'x':
1071
- x_interp_vals = vals_series.values
1072
- elif name == 'y':
1073
- y_interp_vals = vals_series.values
1074
- else:
1075
- z_interp_vals = vals_series.values
1076
-
1077
- if self._verbose:
1078
- print(f'Interpolated space_velocities shape: {space_velocities.shape}')
1079
- print(f'Interpolated position component shapes: x={x_interp_vals.shape}, y={y_interp_vals.shape}, z={z_interp_vals.shape}')
1080
-
1081
- # Create position array
1082
- positions = np.column_stack([x_interp_vals, y_interp_vals, z_interp_vals])
1083
-
1084
- if self._verbose:
1085
- print(f'Final space_velocities shape: {space_velocities.shape}')
1086
- print(f'Final positions shape: {positions.shape}')
1087
- print(f'Space velocities range: {space_velocities.min():.2f} - {space_velocities.max():.2f} m/s')
1088
- print(f'Position range - x: {positions[:, 0].min():.0f} to {positions[:, 0].max():.0f}')
1089
- print(f'Position range - y: {positions[:, 1].min():.0f} to {positions[:, 1].max():.0f}')
1090
- print(f'Position range - z: {positions[:, 2].min():.0f} to {positions[:, 2].max():.0f}')
1091
-
1092
- # Validate outputs
1093
- assert isinstance(space_velocities, np.ndarray), 'space_velocities must be numpy array'
1094
- assert isinstance(positions, np.ndarray), 'positions must be numpy array'
1095
- assert len(space_velocities.shape) == 1, f'space_velocities must be 1D, got shape {space_velocities.shape}'
1096
- assert len(positions.shape) == 2, f'positions must be 2D array, got shape {positions.shape}'
1097
- assert positions.shape[1] == 3, f'positions must have 3 columns (x,y,z), got {positions.shape[1]}'
1098
- assert space_velocities.shape[0] == positions.shape[0], f'velocity and position arrays must have same length'
1099
-
1100
- # Final check for NaN values after cleaning
1101
- assert not np.any(np.isnan(space_velocities)), 'NaN values still present in space_velocities after cleaning'
1102
- assert not np.any(np.isnan(positions)), 'NaN values still present in positions after cleaning'
1103
-
1104
- # Check for reasonable values
1105
- assert np.all(space_velocities > 1000), f'Space velocities too low (min: {space_velocities.min():.2f} m/s)'
1106
- assert np.all(space_velocities < 20000), f'Space velocities too high (max: {space_velocities.max():.2f} m/s)'
1107
-
1108
- position_magnitudes = np.linalg.norm(positions, axis=1)
1109
- assert np.all(position_magnitudes > 6e6), f'Position magnitudes too small (min: {position_magnitudes.min():.0f} m)'
1110
- assert np.all(position_magnitudes < 8e6), f'Position magnitudes too large (max: {position_magnitudes.max():.0f} m)'
1111
-
1112
- return space_velocities, positions
1113
-
1114
- def _compute_ground_velocities(self, space_velocities: np.ndarray, positions: np.ndarray) -> None:
1115
- """Compute ground velocities and effective velocities.
1116
-
1117
- Args:
1118
- space_velocities: Spacecraft velocity magnitudes (1D array).
1119
- positions: Spacecraft position vectors (2D array, shape [N, 3]).
1120
-
1121
- Raises:
1122
- AssertionError: If input arrays have incompatible shapes.
1123
- ValueError: If calculations produce invalid results.
1124
- """
1125
- # Enhanced input validation
1126
- assert isinstance(space_velocities, np.ndarray), f'space_velocities must be numpy array, got {type(space_velocities)}'
1127
- assert isinstance(positions, np.ndarray), f'positions must be numpy array, got {type(positions)}'
1128
- assert len(positions.shape) == 2, f'positions must be 2D, got shape {positions.shape}'
1129
- assert positions.shape[1] == 3, f'positions must have 3 columns, got {positions.shape[1]}'
1130
- assert space_velocities.shape[0] == positions.shape[0], f'Array lengths must match: velocities={space_velocities.shape[0]}, positions={positions.shape[0]}'
1131
-
1132
- # Ensure arrays are proper numpy arrays with correct dtypes
1133
- space_velocities = np.asarray(space_velocities, dtype=np.float64)
1134
- positions = np.asarray(positions, dtype=np.float64)
1135
-
1136
- # Check for NaN/inf values
1137
- assert not np.any(np.isnan(space_velocities)), 'NaN values in space_velocities'
1138
- assert not np.any(np.isnan(positions)), 'NaN values in positions'
1139
- assert not np.any(np.isinf(space_velocities)), 'Infinite values in space_velocities'
1140
- assert not np.any(np.isinf(positions)), 'Infinite values in positions'
1141
-
1142
- if self._verbose:
1143
- print('Computing ground velocities...')
1144
- print(f'Space velocities shape: {space_velocities.shape}')
1145
- print(f'Positions shape: {positions.shape}')
1146
- print(f'Slant range vec shape: {self.slant_range_vec.shape}')
1147
- print(f'Input data ranges:')
1148
- print(f' Space velocities: {space_velocities.min():.2f} - {space_velocities.max():.2f} m/s')
1149
- print(f' Positions X: {positions[:, 0].min():.0f} - {positions[:, 0].max():.0f} m')
1150
- print(f' Positions Y: {positions[:, 1].min():.0f} - {positions[:, 1].max():.0f} m')
1151
- print(f' Positions Z: {positions[:, 2].min():.0f} - {positions[:, 2].max():.0f} m')
1152
-
1153
- # Earth model calculations
1154
- a = float(cnst.WGS84_SEMI_MAJOR_AXIS_M)
1155
- b = float(cnst.WGS84_SEMI_MINOR_AXIS_M)
1156
-
1157
- if self._verbose:
1158
- print(f'Earth model parameters: a={a:.0f} m, b={b:.0f} m')
1159
-
1160
- # Calculate spacecraft heights (magnitudes of position vectors)
1161
- H = np.linalg.norm(positions, axis=1) # axis=1 for row-wise norm
1162
- H = np.asarray(H, dtype=np.float64)
1163
-
1164
- # Validate H calculation
1165
- assert H.shape == space_velocities.shape, f'H shape {H.shape} must match velocities shape {space_velocities.shape}'
1166
- assert not np.any(np.isnan(H)), 'NaN values in H (spacecraft heights)'
1167
- assert np.all(H > 0), 'All spacecraft heights must be positive'
1168
-
1169
- W = space_velocities / H
1170
- W = np.asarray(W, dtype=np.float64)
1171
-
1172
- # Calculate latitude using more robust method
1173
- xy_distance = np.sqrt(positions[:, 0]**2 + positions[:, 1]**2)
1174
- xy_distance = np.asarray(xy_distance, dtype=np.float64)
1175
- lat = np.arctan2(positions[:, 2], xy_distance)
1176
- lat = np.asarray(lat, dtype=np.float64)
1177
-
1178
- if self._verbose:
1179
- print(f'H (heights) shape: {H.shape}, range: {H.min():.0f} - {H.max():.0f} m')
1180
- print(f'W (angular velocities) shape: {W.shape}, range: {W.min():.6f} - {W.max():.6f} rad/s')
1181
- print(f'Latitudes range: {np.degrees(lat.min()):.2f} - {np.degrees(lat.max()):.2f} deg')
1182
-
1183
- # Validate intermediate calculations
1184
- assert not np.any(np.isnan(W)), 'NaN values in W (angular velocities)'
1185
- assert not np.any(np.isnan(lat)), 'NaN values in latitude'
1186
-
1187
- # Local Earth radius calculation with explicit numpy array operations
1188
- cos_lat = np.cos(lat)
1189
- sin_lat = np.sin(lat)
1190
- cos_lat = np.asarray(cos_lat, dtype=np.float64)
1191
- sin_lat = np.asarray(sin_lat, dtype=np.float64)
1192
-
1193
- # Ensure all terms are numpy arrays before sqrt operation
1194
- numerator = np.asarray(a**4 * cos_lat**2 + b**4 * sin_lat**2, dtype=np.float64)
1195
- denominator = np.asarray(a**2 * cos_lat**2 + b**2 * sin_lat**2, dtype=np.float64)
1196
- ratio = numerator / denominator
1197
- ratio = np.asarray(ratio, dtype=np.float64)
1198
-
1199
- local_earth_rad = np.sqrt(ratio)
1200
- local_earth_rad = np.asarray(local_earth_rad, dtype=np.float64)
1201
-
1202
- if self._verbose:
1203
- print(f'Local Earth radius range: {local_earth_rad.min():.0f} - {local_earth_rad.max():.0f} m')
1204
-
1205
- # Validate local Earth radius
1206
- assert not np.any(np.isnan(local_earth_rad)), 'NaN values in local_earth_rad'
1207
- assert np.all(local_earth_rad > 0), 'All local Earth radii must be positive'
1208
-
1209
- # Ensure slant_range_vec is also a proper numpy array
1210
- slant_range_vec = np.asarray(self.slant_range_vec, dtype=np.float64)
1211
-
1212
- # Broadcasting for slant range calculation
1213
- slant_range_2d = slant_range_vec[np.newaxis, :] # Shape: [1, range_samples]
1214
- local_earth_rad_2d = local_earth_rad[:, np.newaxis] # Shape: [azimuth_samples, 1]
1215
- H_2d = H[:, np.newaxis] # Shape: [azimuth_samples, 1]
1216
- W_2d = W[:, np.newaxis] # Shape: [azimuth_samples, 1]
1217
-
1218
- # Calculate cosine of look angle with explicit array operations
1219
- term1 = np.asarray(local_earth_rad_2d**2, dtype=np.float64)
1220
- term2 = np.asarray(H_2d**2, dtype=np.float64)
1221
- term3 = np.asarray(slant_range_2d**2, dtype=np.float64)
1222
- term4 = np.asarray(2 * local_earth_rad_2d * H_2d, dtype=np.float64)
1223
-
1224
- cos_beta = (term1 + term2 - term3) / term4
1225
- cos_beta = np.asarray(cos_beta, dtype=np.float64)
1226
-
1227
- # Clip to valid range for cosine
1228
- cos_beta = np.clip(cos_beta, -1.0, 1.0)
1229
-
1230
- if self._verbose:
1231
- print(f'cos_beta shape: {cos_beta.shape}')
1232
- print(f'cos_beta range: {cos_beta.min():.3f} - {cos_beta.max():.3f}')
1233
-
1234
- # Calculate ground velocities
1235
- ground_velocities = local_earth_rad_2d * W_2d * cos_beta
1236
- ground_velocities = np.asarray(ground_velocities, dtype=np.float64)
1237
-
1238
- if self._verbose:
1239
- print(f'Ground velocities shape: {ground_velocities.shape}')
1240
-
1241
- # Calculate effective velocities
1242
- space_velocities_2d = space_velocities[:, np.newaxis] # Shape: [azimuth_samples, 1]
1243
- effective_vel_product = space_velocities_2d * ground_velocities
1244
- effective_vel_product = np.asarray(effective_vel_product, dtype=np.float64)
1245
-
1246
- # Ensure non-negative values before sqrt
1247
- effective_vel_product = np.maximum(effective_vel_product, 0.0)
1248
-
1249
- self.effective_velocities = np.sqrt(effective_vel_product)
1250
- self.effective_velocities = np.asarray(self.effective_velocities, dtype=np.float64)
1251
-
1252
- if self._verbose:
1253
- print(f'Effective velocities shape: {self.effective_velocities.shape}')
1254
- print(f'Effective velocities range: {self.effective_velocities.min():.2f} - {self.effective_velocities.max():.2f} m/s')
1255
-
1256
- # Final validation
1257
- assert not np.any(np.isnan(self.effective_velocities)), 'NaN values in effective velocities'
1258
- assert not np.any(np.isinf(self.effective_velocities)), 'Infinite values in effective velocities'
1259
- assert np.all(self.effective_velocities >= 0), 'All effective velocities must be non-negative'
1260
-
1261
- # ==================== MAIN PROCESSING METHODS ====================
1262
-
1263
- @flush_mem
1264
- @timing_decorator
1265
- def data_focus(self) -> None:
1266
- """Perform memory-efficient complete SAR data focusing using Range Doppler Algorithm.
1267
-
1268
- This method orchestrates the complete SAR focusing process by calling
1269
- three main processing steps in sequence.
1270
-
1271
- Raises:
1272
- RuntimeError: If data dimensions change unexpectedly during processing.
1273
- """
1274
- if self._verbose:
1275
- print('Starting memory-efficient SAR data focusing...')
1276
- print(f'Initial radar data shape: {self.radar_data.shape}')
1277
- print_memory()
1278
-
1279
- # Store initial shape for verification
1280
- initial_shape = self.radar_data.shape
1281
- expected_shape = (self.len_az_line, self.len_range_line)
1282
-
1283
- assert initial_shape == expected_shape, \
1284
- f'Initial data shape {initial_shape} does not match expected {expected_shape}'
1285
-
1286
-
1287
- self.raw_data = copy.deepcopy(self.radar_data)
1288
- if self._verbose:
1289
- print(f'Raw radar data shape: {self.raw_data.shape}')
1290
- print_memory()
1291
- # ------------------------------------------------------------------------
1292
- # Step 1: 2D FFT transformation (preserves dimensions)
1293
- self.fft2d()
1294
- assert self.radar_data.shape == initial_shape, \
1295
- f'FFT changed data shape from {initial_shape} to {self.radar_data.shape}'
1296
- # ------------------------------------------------------------------------
1297
-
1298
- # Step 2: Range compression
1299
- self.range_compression()
1300
- self.range_compressed_data = ifft2d(copy.deepcopy(self.radar_data))
1301
- if self._verbose:
1302
- print(f'Range compressed data shape: {self.radar_data.shape}')
1303
- print_memory()
1304
- # ------------------------------------------------------------------------
1305
-
1306
- # Step 3: Range Cell Migration Correction
1307
- self.rcmc()
1308
- self.rcmc_data = iff_azimuth(copy.deepcopy(self.radar_data))
1309
- if self._verbose:
1310
- print(f'RCMC data shape: {self.radar_data.shape}')
1311
- print_memory()
1312
- # ------------------------------------------------------------------------
1313
- # Step 4: Azimuth compression
1314
- self.azimuth_compression()
1315
- self.azimuth_compressed_data = self.radar_data
1316
- if self._verbose:
1317
- print(f'SAR data focusing completed successfully!')
1318
- print(f'Final radar data shape: {self.radar_data.shape}')
1319
- print_memory()
1320
-
1321
- @flush_mem
1322
- @timing_decorator
1323
- def range_compression(self) -> None:
1324
- """Perform memory-efficient range compression step.
1325
-
1326
- This method applies the range compression filter to compress the radar
1327
- signal in the range dimension while preserving data dimensions.
1328
-
1329
- Raises:
1330
- RuntimeError: If data dimensions change unexpectedly during processing.
1331
- """
1332
- if self._verbose:
1333
- print('Starting range compression...')
1334
- print(f'Input radar data shape: {self.radar_data.shape}')
1335
- print_memory()
1336
-
1337
- # Store initial shape for verification
1338
- initial_shape = self.radar_data.shape
1339
-
1340
- # Legacy compatibility parameters
1341
- w_pad = 0
1342
- original_w = initial_shape[1]
1343
-
1344
- if self._verbose:
1345
- print(f'Processing with original_w={original_w}')
1346
-
1347
- # Perform range compression
1348
- self._perform_range_compression_efficient(w_pad, original_w)
1349
-
1350
- # Verify dimensions are preserved
1351
- assert self.radar_data.shape == initial_shape, \
1352
- f'Range compression changed data shape from {initial_shape} to {self.radar_data.shape}'
1353
-
1354
- if self._verbose:
1355
- print(f'Range compression completed successfully!')
1356
- print(f'Output radar data shape: {self.radar_data.shape}')
1357
- print_memory()
1358
-
1359
- @flush_mem
1360
- @timing_decorator
1361
- def rcmc(self) -> None:
1362
- """Perform memory-efficient Range Cell Migration Correction.
1363
-
1364
- This method applies the RCMC filter to correct for range cell migration
1365
- effects and performs inverse FFT in the range dimension.
1366
-
1367
- Raises:
1368
- RuntimeError: If data dimensions change unexpectedly during processing.
1369
- """
1370
- if self._verbose:
1371
- print('Starting Range Cell Migration Correction...')
1372
- print(f'Input radar data shape: {self.radar_data.shape}')
1373
- print_memory()
1374
-
1375
- # Store initial shape for verification
1376
- initial_shape = self.radar_data.shape
1377
-
1378
- # Perform RCMC
1379
- self._perform_rcmc_efficient()
1380
-
1381
- # Verify dimensions are preserved
1382
- assert self.radar_data.shape == initial_shape, \
1383
- f'RCMC changed data shape from {initial_shape} to {self.radar_data.shape}'
1384
-
1385
- if self._verbose:
1386
- print(f'RCMC completed successfully!')
1387
- print(f'Output radar data shape: {self.radar_data.shape}')
1388
- print_memory()
1389
-
1390
- @flush_mem
1391
- @timing_decorator
1392
- def azimuth_compression(self) -> None:
1393
- """Perform memory-efficient azimuth compression step.
1394
-
1395
- This method applies the azimuth compression filter to focus the radar
1396
- signal in the azimuth dimension and performs inverse FFT in azimuth.
1397
-
1398
- Raises:
1399
- RuntimeError: If data dimensions change unexpectedly during processing.
1400
- """
1401
- if self._verbose:
1402
- print('Starting azimuth compression...')
1403
- print(f'Input radar data shape: {self.radar_data.shape}')
1404
- print_memory()
1405
-
1406
- # Store initial shape for verification
1407
- initial_shape = self.radar_data.shape
1408
-
1409
- # Perform azimuth compression
1410
- self._perform_azimuth_compression_efficient()
1411
-
1412
- # Verify dimensions are preserved
1413
- assert self.radar_data.shape == initial_shape, \
1414
- f'Azimuth compression changed data shape from {initial_shape} to {self.radar_data.shape}'
1415
-
1416
- if self._verbose:
1417
- print(f'Azimuth compression completed successfully!')
1418
- print(f'Output radar data shape: {self.radar_data.shape}')
1419
- print_memory()
1420
-
1421
- def _perform_range_compression_efficient(self, w_pad: int, original_w: int) -> None:
1422
- """Perform memory-efficient range compression step while preserving data dimensions.
1423
-
1424
- Args:
1425
- w_pad: Width padding (ignored - dimensions preserved).
1426
- original_w: Original width (for verification).
1427
-
1428
- Raises:
1429
- ValueError: If array shapes are incompatible.
1430
- AssertionError: If dimensions change unexpectedly.
1431
- """
1432
- if self._verbose:
1433
- print(f'Starting memory-efficient range compression...')
1434
- print(f'Radar data shape: {self.radar_data.shape}')
1435
- print_memory()
1436
-
1437
- # Store original shape for verification
1438
- original_shape = self.radar_data.shape
1439
- expected_shape = (self.len_az_line, self.len_range_line)
1440
-
1441
- # Verify we still have expected dimensions
1442
- assert original_shape == expected_shape, \
1443
- f'Unexpected radar data shape: {original_shape}, expected: {expected_shape}'
1444
-
1445
- # Get range filter with matching dimensions
1446
- range_filter = self.get_range_filter()
1447
-
1448
- if self._verbose:
1449
- print(f'Range filter shape: {range_filter.shape}')
1450
- print(f'Applying range compression filter...')
1451
-
1452
- # Apply range compression filter - USE SAME METHOD AS ORIGINAL
1453
- self.radar_data = multiply(self.radar_data, range_filter)
1454
-
1455
- # Cleanup filter
1456
- cleanup_variables(range_filter)
1457
-
1458
- # Verify dimensions are preserved
1459
- assert self.radar_data.shape == original_shape, \
1460
- f'Range compression changed data shape from {original_shape} to {self.radar_data.shape}'
1461
-
1462
- if self._verbose:
1463
- print(f'Range compression completed. Data shape: {self.radar_data.shape}')
1464
- print_memory()
1465
-
1466
- def _perform_rcmc_efficient(self) -> None:
1467
- """Perform memory-efficient Range Cell Migration Correction."""
1468
- if self._verbose:
1469
- print('Starting memory-efficient RCMC...')
1470
- print_memory()
1471
-
1472
- rcmc_filter = self.get_rcmc()
1473
-
1474
- # Use SAME METHOD AS ORIGINAL
1475
- self.radar_data = multiply(self.radar_data, rcmc_filter)
1476
-
1477
- # Cleanup filter
1478
- cleanup_variables(rcmc_filter)
1479
-
1480
- # Inverse FFT in range
1481
- self.ifft_range()
1482
-
1483
- if self._verbose:
1484
- print('RCMC completed.')
1485
- print_memory()
1486
-
1487
- def _perform_azimuth_compression_efficient(self) -> None:
1488
- """Perform memory-efficient azimuth compression step.
1489
-
1490
- Raises:
1491
- ValueError: If array shapes are incompatible.
1492
- """
1493
- if self._verbose:
1494
- print('Starting memory-efficient azimuth compression...')
1495
- print(f'Radar data shape before azimuth filter: {self.radar_data.shape}')
1496
- print_memory()
1497
-
1498
- # Get azimuth filter
1499
- azimuth_filter = self.get_azimuth_filter()
1500
-
1501
- if self._verbose:
1502
- print(f'Azimuth filter shape: {azimuth_filter.shape}')
1503
-
1504
- # Apply azimuth compression - USE SAME METHOD AS ORIGINAL
1505
- self.radar_data = multiply(self.radar_data, azimuth_filter)
1506
-
1507
- # Cleanup filter
1508
- cleanup_variables(azimuth_filter)
1509
-
1510
- if self._verbose:
1511
- print(f'Radar data shape after azimuth compression: {self.radar_data.shape}')
1512
-
1513
- # Inverse FFT in azimuth
1514
- self.ifft_azimuth()
1515
-
1516
- if self._verbose:
1517
- print(f'Final radar data shape: {self.radar_data.shape}')
1518
- print_memory()
1519
-
1520
- # ==================== UTILITY METHODS ====================
1521
-
1522
-
1523
- @timing_decorator
1524
- def save_file(self, save_path: Union[str, Path]) -> None:
1525
- """Save processed radar data to file.
1526
-
1527
- Args:
1528
- save_path: Path where to save the data.
1529
- """
1530
- dump(self.radar_data, save_path)
1531
- if self._verbose:
1532
- print(f'Data saved to {save_path}')
1533
-
1534
-
1535
- # ==================== EEEZY ====================
1536
- # For backward compatibility - keep original method name as alias
1537
- _prompt_tx_replica = _generate_tx_replica
1538
-
1539
- # ==================== GRAPHIC METHODS ====================
1540
- def _display_slice(self, slice=(0, 4000, 0, 4000),
1541
- vmin=0, vmax=1000,
1542
- figsize=(12,12),
1543
- title = None,
1544
- step: str = 'az') -> None:
1545
- """Display a slice of the radar data for visualization."""
1546
-
1547
- assert step in ['raw', 'range_compressed', 'rcmc', 'az_compressed'], \
1548
- 'Invalid step. Choose from "raw", "range_compressed", "rcmc", or "az_compressed".'
1549
-
1550
- if step == 'raw':
1551
- data = self.raw_data
1552
- elif step == 'range_compressed':
1553
- data = self.range_compressed_data
1554
- elif step == 'rcmc':
1555
- data = self.rcmc_data
1556
- elif step == 'az_compressed':
1557
- data = self.radar_data
1558
- else:
1559
- raise ValueError(f'Invalid step: {step}. Choose from "raw", "range", "rcmc", or "az".')
1560
-
1561
- plt.figure(figsize=figsize)
1562
- plt.imshow(np.abs(data[slice[0]:slice[1], slice[2]:slice[3]]), vmin=vmin, vmax=vmax, cmap='viridis')
1563
- plt.axis('off')
1564
- if title:
1565
- plt.title(title)
1566
- plt.show()