sarkit-convert 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,17 @@
1
+ """
2
+ ======================
3
+ SARkit-convert (:mod:`sarkit-convert`)
4
+ ======================
5
+
6
+ The main namespace is almost empty by design.
7
+
8
+ .. list-table::
9
+
10
+ * - ``__version__``
11
+ - SARkit-convert version string
12
+
13
+ """
14
+
15
+ from sarkit_convert._version import __version__
16
+
17
+ __all__ = ["__version__"]
@@ -0,0 +1,253 @@
1
+ """
2
+ =====================================
3
+ Utility functions for SICD converters
4
+ =====================================
5
+
6
+ Common utility functions for use in SICD converters
7
+
8
+ """
9
+
10
+ import itertools
11
+
12
+ import numpy as np
13
+ import numpy.polynomial.polynomial as npp
14
+ import sarkit.wgs84
15
+
16
+ RNIIRS_FIT_PARAMETERS = np.array([3.4761, 0.4357], dtype="float64")
17
+
18
+
19
+ def fit_state_vectors(
20
+ fit_time_range, times, positions, velocities=None, accelerations=None, order=5
21
+ ):
22
+ times = np.asarray(times)
23
+ positions = np.asarray(positions)
24
+ knots_per_state = 1
25
+ if velocities is not None:
26
+ velocities = np.asarray(velocities)
27
+ knots_per_state += 1
28
+ if accelerations is not None:
29
+ accelerations = np.asarray(accelerations)
30
+ knots_per_state += 1
31
+
32
+ num_coefs = order + 1
33
+ states_needed = int(np.ceil(num_coefs / knots_per_state))
34
+ if states_needed > times.size:
35
+ raise ValueError("Not enough state vectors")
36
+ start_state = max(np.sum(times < fit_time_range[0]) - 1, 0)
37
+ end_state = min(np.sum(times < fit_time_range[1]) + 1, times.size)
38
+ while end_state - start_state < states_needed:
39
+ start_state = max(start_state - 1, 0)
40
+ end_state = min(end_state + 1, times.size)
41
+
42
+ rnc = np.arange(num_coefs)
43
+ used_states = slice(start_state, end_state)
44
+ used_times = times[used_states][:, np.newaxis]
45
+ independent_stack = [used_times**rnc]
46
+ dependent_stack = [positions[used_states, :]]
47
+ if velocities is not None:
48
+ independent_stack.append(rnc * used_times ** (rnc - 1).clip(0))
49
+ dependent_stack.append(velocities[used_states, :])
50
+ if accelerations is not None:
51
+ independent_stack.append(rnc * (rnc - 1) * used_times ** (rnc - 2).clip(0))
52
+ dependent_stack.append(accelerations[used_states, :])
53
+
54
+ dependent = np.stack(dependent_stack, axis=-2)
55
+ independent = np.stack(independent_stack, axis=-2)
56
+ return np.linalg.lstsq(
57
+ independent.reshape(-1, independent.shape[-1]),
58
+ dependent.reshape(-1, dependent.shape[-1]),
59
+ rcond=-1,
60
+ )[0]
61
+
62
+
63
+ def polyfit2d(x, y, z, order1, order2):
64
+ """Fits 2d polynomials to data."""
65
+ if x.ndim != 1 or y.ndim != 1:
66
+ raise ValueError("Expected x and y to be one dimensional")
67
+ if not 0 < z.ndim <= 2:
68
+ raise ValueError("Expected z to be one or two dimensional")
69
+ if not x.shape[0] == y.shape[0] == z.shape[0]:
70
+ raise ValueError("Expected x, y, z to have same leading dimension size")
71
+ vander = npp.polyvander2d(x, y, (order1, order2))
72
+ scales = np.sqrt(np.square(vander).sum(0))
73
+ coefs_flat = (np.linalg.lstsq(vander / scales, z, rcond=-1)[0].T / scales).T
74
+ return coefs_flat.reshape(order1 + 1, order2 + 1)
75
+
76
+
77
+ def polyfit2d_tol(x, y, z, max_order_x, max_order_y, tol, strict_tol=False):
78
+ """Fits 2D polys of minimum order to bring the maximum residual under tol.
79
+
80
+ Args
81
+ ----
82
+ x: array-like
83
+ First independent variable values. One dimensional.
84
+ y: array-like
85
+ Second independent variable values. One dimensional.
86
+ z: array-like
87
+ Dependent variable values. Leading dimension must have same size as `x` and `y` .
88
+ max_order_x: int
89
+ The maximum order in `x` to consider
90
+ max_order_y: int
91
+ The maximum order in `y` to consider
92
+ tol: float
93
+ The maximum residual requested.
94
+ strict_tol: bool
95
+ ``True`` if an exception should be raised if `tol` is not met with allowed orders.
96
+
97
+ If ``False``, return best fitting polynomial of allowed order.
98
+
99
+ Returns
100
+ -------
101
+ poly
102
+ 2d polynomials of common orders no greater than `(max_order_x, max_order_y)` .
103
+
104
+ Raises
105
+ ------
106
+ `ValueError`
107
+ If `strict_tol` and tolerance is not reached.
108
+
109
+ """
110
+ orders = sorted(
111
+ list(itertools.product(range(max_order_x + 1), range(max_order_y + 1))),
112
+ key=lambda x: (x[0] + 1) * (x[1] + 1),
113
+ )
114
+ best = None
115
+ for order_x, order_y in orders:
116
+ poly = polyfit2d(x, y, z, order_x, order_y)
117
+ resid = np.abs(z - np.moveaxis(npp.polyval2d(x, y, poly), 0, -1)).max()
118
+ if resid <= tol:
119
+ return poly
120
+ if best is None or resid < best[1]:
121
+ best = (poly, resid)
122
+ if strict_tol:
123
+ raise ValueError("Max order exceeded before tolerance was reached")
124
+ return best[0]
125
+
126
+
127
+ def broadening_from_amp(amp_vals, threshold_db=None):
128
+ """Compute the broadening factor from amplitudes
129
+
130
+ Parameters
131
+ ----------
132
+ amp_vals: array-like
133
+ window amplitudes
134
+ threshold_db: float, optional
135
+ threshold to use to compute broadening (Default: 10*log10(0.5))
136
+
137
+ Returns
138
+ -------
139
+ float
140
+
141
+ """
142
+ if threshold_db is None:
143
+ threshold = np.sqrt(0.5)
144
+ else:
145
+ threshold = 10 ** (threshold_db / 20)
146
+ amp_vals = np.asarray(amp_vals)
147
+ fft_size = 2 ** int(np.ceil(np.log2(amp_vals.size * 10000)))
148
+ impulse_response = np.abs(np.fft.fft(amp_vals, fft_size))
149
+ impulse_response /= impulse_response.max()
150
+ width = (impulse_response[: fft_size // 2] < threshold).argmax() + (
151
+ impulse_response[-1 : fft_size // 2 : -1] > threshold
152
+ ).argmin()
153
+
154
+ return width / fft_size * amp_vals.size
155
+
156
+
157
+ def _get_sigma0_noise(xml_helper):
158
+ """Calculate the absolute noise estimate, in sigma0 power units."""
159
+
160
+ if xml_helper.element_tree.find("./{*}Radiometric/{*}SigmaZeroSFPoly") is None:
161
+ raise ValueError(
162
+ "Radiometric.SigmaZeroSFPoly is not populated, so no sigma0 noise estimate can be derived."
163
+ )
164
+ if (
165
+ xml_helper.load("./{*}Radiometric/{*}NoiseLevel/{*}NoiseLevelType")
166
+ != "ABSOLUTE"
167
+ ):
168
+ raise ValueError(
169
+ "Radiometric.NoiseLevel.NoiseLevelType is not `ABSOLUTE` so no noise estimate can be derived."
170
+ )
171
+
172
+ noisepoly = xml_helper.load("./{*}Radiometric/{*}NoiseLevel/{*}NoisePoly")
173
+ scp_noise_db = noisepoly[0, 0]
174
+ scp_noise = 10 ** (scp_noise_db / 10)
175
+
176
+ # convert to SigmaZero value
177
+ sigma_zero_sf = xml_helper.load("./{*}Radiometric/{*}SigmaZeroSFPoly")
178
+ scp_noise *= sigma_zero_sf[0, 0]
179
+
180
+ return scp_noise
181
+
182
+
183
+ def _get_default_signal_estimate(xml_helper):
184
+ """Gets default signal for use in the RNIIRS calculation.
185
+
186
+ This will be 1.0 for copolar (or unknown) collections, and 0.25 for cross-pole collections."""
187
+
188
+ pol = xml_helper.load("./{*}ImageFormation/{*}TxRcvPolarizationProc")
189
+ if pol is None or ":" not in pol:
190
+ return 1.0
191
+
192
+ pols = pol.split(":")
193
+
194
+ return 1.0 if pols[0] == pols[1] else 0.25
195
+
196
+
197
+ def _estimate_rniirs(information_density):
198
+ """Calculate an RNIIRS estimate from the information density or Shannon-Hartley channel capacity.
199
+
200
+ This mapping has been empirically determined by fitting Shannon-Hartley channel
201
+ capacity to RNIIRS for some sample images.
202
+
203
+ To maintain positivity of the estimated rniirs, this transitions to a linear
204
+ model.
205
+
206
+ """
207
+ a = RNIIRS_FIT_PARAMETERS
208
+ iim_transition = np.exp(1 - np.log(2) * a[0] / a[1])
209
+ slope = a[1] / (iim_transition * np.log(2))
210
+
211
+ if not isinstance(information_density, np.ndarray):
212
+ information_density = np.array(information_density, dtype="float64")
213
+ orig_ndim = information_density.ndim
214
+ if orig_ndim == 0:
215
+ information_density = np.reshape(information_density, (1,))
216
+
217
+ out = np.empty(information_density.shape, dtype="float64")
218
+ mask = information_density > iim_transition
219
+ mask_other = ~mask
220
+ if np.any(mask):
221
+ out[mask] = a[0] + a[1] * np.log2(information_density[mask])
222
+ if np.any(mask_other):
223
+ out[mask_other] = slope * information_density[mask_other]
224
+
225
+ if orig_ndim == 0:
226
+ return float(out[0])
227
+ return out
228
+
229
+
230
+ def get_rniirs_estimate(xml_helper):
231
+ """This calculates the value(s) for RNIIRS and information density for SICD, according to the RGIQE."""
232
+ scp_noise = _get_sigma0_noise(xml_helper)
233
+ signal = _get_default_signal_estimate(xml_helper)
234
+
235
+ u_row = xml_helper.load("./{*}Grid/{*}Row/{*}UVectECF")
236
+ u_col = xml_helper.load("./{*}Grid/{*}Col/{*}UVectECF")
237
+ ipn = np.cross(u_row, u_col)
238
+ u_ipn = ipn / np.linalg.norm(ipn)
239
+
240
+ scp_llh = xml_helper.load("./{*}GeoData/{*}SCP/{*}LLH")
241
+ u_gpn = sarkit.wgs84.up(scp_llh)
242
+
243
+ bw_sf = np.dot(u_gpn, u_ipn)
244
+ bw_area = abs(
245
+ xml_helper.load("./{*}Grid/{*}Row/{*}ImpRespBW")
246
+ * xml_helper.load("./{*}Grid/{*}Col/{*}ImpRespBW")
247
+ * bw_sf
248
+ )
249
+
250
+ inf_density = float(bw_area * np.log2(1 + signal / scp_noise))
251
+ rniirs = float(_estimate_rniirs(inf_density))
252
+
253
+ return inf_density, rniirs
@@ -0,0 +1 @@
1
+ __version__ = '0.1.0'