ticoi 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ticoi might be problematic. Click here for more details.
- ticoi/__about__.py +1 -0
- ticoi/__init__.py +0 -0
- ticoi/core.py +1500 -0
- ticoi/cube_data_classxr.py +2204 -0
- ticoi/cube_writer.py +741 -0
- ticoi/example.py +81 -0
- ticoi/filtering_functions.py +676 -0
- ticoi/interpolation_functions.py +236 -0
- ticoi/inversion_functions.py +1015 -0
- ticoi/mjd2date.py +31 -0
- ticoi/optimize_coefficient_functions.py +264 -0
- ticoi/pixel_class.py +1830 -0
- ticoi/seasonality_functions.py +209 -0
- ticoi/utils.py +725 -0
- ticoi-0.0.1.dist-info/METADATA +152 -0
- ticoi-0.0.1.dist-info/RECORD +18 -0
- ticoi-0.0.1.dist-info/WHEEL +4 -0
- ticoi-0.0.1.dist-info/licenses/LICENSE +165 -0
|
@@ -0,0 +1,676 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Author : Laurane Charrier, Lei Guo, Nathan Lioret
|
|
3
|
+
Reference:
|
|
4
|
+
Charrier, L., Yan, Y., Koeniguer, E. C., Leinss, S., & Trouvé, E. (2021). Extraction of velocity time series with an optimal temporal sampling from displacement
|
|
5
|
+
observation networks. IEEE Transactions on Geoscience and Remote Sensing.
|
|
6
|
+
Charrier, L., Yan, Y., Colin Koeniguer, E., Mouginot, J., Millan, R., & Trouvé, E. (2022). Fusion of multi-temporal and multi-sensor ice velocity observations.
|
|
7
|
+
ISPRS annals of the photogrammetry, remote sensing and spatial information sciences, 3, 311-318.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import dask.array as da
|
|
11
|
+
import numpy as np
|
|
12
|
+
import xarray as xr
|
|
13
|
+
from scipy.ndimage import gaussian_filter1d, median_filter
|
|
14
|
+
from scipy.signal import savgol_filter
|
|
15
|
+
from sklearn.decomposition import FastICA
|
|
16
|
+
from statsmodels.nonparametric.smoothers_lowess import lowess
|
|
17
|
+
|
|
18
|
+
# %% ======================================================================== #
|
|
19
|
+
# TEMPORAL SMOOTHING #
|
|
20
|
+
# =========================================================================%% #
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def numpy_ewma_vectorized(series: np.ndarray, halflife: int = 30) -> np.ndarray:
|
|
24
|
+
"""
|
|
25
|
+
Calculate the exponentially weighted moving average of a series using vectorized operations.
|
|
26
|
+
|
|
27
|
+
:param series: Input series for which the EWMA needs to be calculated
|
|
28
|
+
:param halflife: Halflife parameter for the EWMA calculation (default is 30)
|
|
29
|
+
|
|
30
|
+
:return: The exponentially weighted moving average of the input series
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
alpha = 1 - np.exp(-np.log(2) / halflife)
|
|
34
|
+
alpha_rev = 1 - alpha
|
|
35
|
+
n = series.shape[0]
|
|
36
|
+
pows = alpha_rev ** (np.arange(n + 1))
|
|
37
|
+
scale_arr = 1 / pows[:-1]
|
|
38
|
+
offset = series[0] * pows[1:]
|
|
39
|
+
pw0 = alpha * alpha_rev ** (n - 1)
|
|
40
|
+
mult = series * pw0 * scale_arr
|
|
41
|
+
cumsums = mult.cumsum()
|
|
42
|
+
out = offset + cumsums * scale_arr[::-1]
|
|
43
|
+
return out
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def ewma_smooth(
|
|
47
|
+
series: np.ndarray,
|
|
48
|
+
t_obs: np.ndarray,
|
|
49
|
+
t_interp: np.ndarray,
|
|
50
|
+
t_out: np.ndarray,
|
|
51
|
+
t_win: int = 90,
|
|
52
|
+
sigma: int = 3,
|
|
53
|
+
order: int | None = 3,
|
|
54
|
+
) -> np.ndarray:
|
|
55
|
+
"""
|
|
56
|
+
Calculates an exponentially weighted moving average (EWMA) of a series at specific time points.
|
|
57
|
+
|
|
58
|
+
:param series: Input series to be smoothed
|
|
59
|
+
:param t_obs: Time points of the observed series
|
|
60
|
+
:param t_interp: Time points to interpolate the series at
|
|
61
|
+
:param t_out: Time points to return the smoothed series at
|
|
62
|
+
:param t_win: Smoothing window size (default is 90)
|
|
63
|
+
|
|
64
|
+
:return: The smoothed series at the specified time points
|
|
65
|
+
"""
|
|
66
|
+
t_win = 10
|
|
67
|
+
t_obs = t_obs[~np.isnan(series)]
|
|
68
|
+
series = series[~np.isnan(series)]
|
|
69
|
+
try:
|
|
70
|
+
series_smooth = numpy_ewma_vectorized(series, halflife=t_win)
|
|
71
|
+
series_interp = np.interp(t_interp, t_obs, series_smooth)
|
|
72
|
+
except: # If there is only nan
|
|
73
|
+
return np.zeros(len(t_out))
|
|
74
|
+
return series_interp[t_out]
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def gaussian_smooth(
|
|
78
|
+
series: np.ndarray,
|
|
79
|
+
t_obs: np.ndarray,
|
|
80
|
+
t_interp: np.ndarray,
|
|
81
|
+
t_out: np.ndarray,
|
|
82
|
+
t_win: int = 90,
|
|
83
|
+
sigma: int = 3,
|
|
84
|
+
order: int | None = 3,
|
|
85
|
+
) -> np.ndarray:
|
|
86
|
+
"""
|
|
87
|
+
Perform Gaussian smoothing on a time series data.
|
|
88
|
+
|
|
89
|
+
:param series: Input time series data
|
|
90
|
+
:param t_obs: Time observations corresponding to the input data
|
|
91
|
+
:param t_interp: Time points for interpolation
|
|
92
|
+
:param t_out: Time points for the output
|
|
93
|
+
:param t_win: Smoothing window size (default is 90)
|
|
94
|
+
:param sigma: Standard deviation for Gaussian kernel (default is 3)
|
|
95
|
+
:param order: Order of the smoothing function (default is 3)
|
|
96
|
+
|
|
97
|
+
:return:The smoothed time series data at the specified output time points
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
t_obs = t_obs[~np.isnan(series)]
|
|
101
|
+
series = series[~np.isnan(series)]
|
|
102
|
+
try:
|
|
103
|
+
# noinspection PyTypeChecker
|
|
104
|
+
# series = median_filter(series, size=5, mode='reflect', axes=0)
|
|
105
|
+
series_interp = np.interp(t_interp, t_obs, series)
|
|
106
|
+
series_smooth = gaussian_filter1d(series_interp, sigma, mode="reflect", truncate=4.0, radius=t_win)
|
|
107
|
+
return series_smooth[t_out]
|
|
108
|
+
except:
|
|
109
|
+
return np.zeros(len(t_out))
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def lowess_smooth(
|
|
113
|
+
series: np.ndarray,
|
|
114
|
+
t_obs: np.ndarray,
|
|
115
|
+
t_interp: np.ndarray,
|
|
116
|
+
t_out: np.ndarray,
|
|
117
|
+
t_win: int = 90,
|
|
118
|
+
sigma: int = 3,
|
|
119
|
+
order: int | None = 3,
|
|
120
|
+
) -> np.ndarray:
|
|
121
|
+
try:
|
|
122
|
+
t_win = 60
|
|
123
|
+
frac = t_win / len(t_interp)
|
|
124
|
+
|
|
125
|
+
not_nan = ~np.isnan(series)
|
|
126
|
+
series, t_obs = series[not_nan], t_obs[not_nan]
|
|
127
|
+
series_smooth = lowess(series, t_obs, frac=frac, return_sorted=False)
|
|
128
|
+
series_interp = np.interp(t_interp, t_obs, series_smooth)
|
|
129
|
+
return series_interp[t_out]
|
|
130
|
+
except:
|
|
131
|
+
return np.zeros(len(t_out))
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def median_smooth(
|
|
135
|
+
series: np.ndarray,
|
|
136
|
+
t_obs: np.ndarray,
|
|
137
|
+
t_interp: np.ndarray,
|
|
138
|
+
t_out: np.ndarray,
|
|
139
|
+
t_win: int = 90,
|
|
140
|
+
sigma: int = 3,
|
|
141
|
+
order: int | None = 3,
|
|
142
|
+
) -> np.ndarray:
|
|
143
|
+
"""
|
|
144
|
+
Calculate a smoothed series using median filtering.
|
|
145
|
+
|
|
146
|
+
:param series: The input series to be smoothed
|
|
147
|
+
:param t_obs: The time observations corresponding to the input series
|
|
148
|
+
:param t_interp: The time values for interpolation
|
|
149
|
+
:param t_out: The time values for the output series
|
|
150
|
+
:param t_win: Smoothing window size (default is 90)
|
|
151
|
+
|
|
152
|
+
:return:The smoothed series corresponding to the output time values t_out
|
|
153
|
+
"""
|
|
154
|
+
|
|
155
|
+
t_obs = t_obs[~np.isnan(series)]
|
|
156
|
+
series = series[~np.isnan(series)]
|
|
157
|
+
try:
|
|
158
|
+
series_interp = np.interp(t_interp, t_obs, series)
|
|
159
|
+
series_smooth = median_filter(series_interp, size=t_win, mode="reflect", axes=0)
|
|
160
|
+
except:
|
|
161
|
+
return np.zeros(len(t_out))
|
|
162
|
+
|
|
163
|
+
return series_smooth[t_out]
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def savgol_smooth(
|
|
167
|
+
series: np.ndarray,
|
|
168
|
+
t_obs: np.ndarray,
|
|
169
|
+
t_interp: np.ndarray,
|
|
170
|
+
t_out: np.ndarray,
|
|
171
|
+
t_win: int = 90,
|
|
172
|
+
sigma: int = 3,
|
|
173
|
+
order: int | None = 3,
|
|
174
|
+
) -> np.ndarray:
|
|
175
|
+
"""
|
|
176
|
+
Perform Savitzky-Golay smoothing on a time series.
|
|
177
|
+
|
|
178
|
+
:param series: Input time series to be smoothed
|
|
179
|
+
:param t_obs: Observed time points corresponding to the input series
|
|
180
|
+
:param t_interp: Time points for interpolation
|
|
181
|
+
:param t_out: Time points to extract the smoothed values for
|
|
182
|
+
:param t_win: Smoothing window size (default is 90)
|
|
183
|
+
:param order: Order of the polynomial used in the smoothing (default is 3)
|
|
184
|
+
|
|
185
|
+
:return: The smoothed time series at the specified output time points
|
|
186
|
+
"""
|
|
187
|
+
t_obs = t_obs[~np.isnan(series)]
|
|
188
|
+
series = series[~np.isnan(series)]
|
|
189
|
+
|
|
190
|
+
try:
|
|
191
|
+
series_interp = np.interp(t_interp, t_obs, series)
|
|
192
|
+
series_smooth = savgol_filter(series_interp, window_length=t_win, polyorder=order, axis=-1)
|
|
193
|
+
except:
|
|
194
|
+
return np.zeros(len(t_out))
|
|
195
|
+
|
|
196
|
+
return series_smooth[t_out]
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def ica_denoise(
|
|
200
|
+
series: np.ndarray,
|
|
201
|
+
t_obs: np.ndarray,
|
|
202
|
+
t_interp: np.ndarray,
|
|
203
|
+
t_out: np.ndarray,
|
|
204
|
+
t_win: int = 90,
|
|
205
|
+
sigma: int = 3,
|
|
206
|
+
order: int | None = 3,
|
|
207
|
+
) -> np.ndarray:
|
|
208
|
+
"""
|
|
209
|
+
Perform ICA denoising on a time series data.
|
|
210
|
+
|
|
211
|
+
:param series: Input time series data
|
|
212
|
+
:param t_obs: Time observations corresponding to the input data
|
|
213
|
+
:param t_interp: Time points for interpolation
|
|
214
|
+
:param t_out: Time points for the output
|
|
215
|
+
|
|
216
|
+
:return: The denoised time series data at the specified output time points
|
|
217
|
+
"""
|
|
218
|
+
|
|
219
|
+
# Remove NaN values
|
|
220
|
+
valid_indices = ~np.isnan(series)
|
|
221
|
+
t_obs = t_obs[valid_indices]
|
|
222
|
+
series = series[valid_indices]
|
|
223
|
+
|
|
224
|
+
try:
|
|
225
|
+
# Interpolate series to uniform time points
|
|
226
|
+
# series_interp = np.interp(t_interp, t_obs, series)
|
|
227
|
+
|
|
228
|
+
# # Reshape for ICA
|
|
229
|
+
# series_interp_reshaped = series_interp.reshape(-1, 1)
|
|
230
|
+
|
|
231
|
+
# Apply ICA
|
|
232
|
+
ica = FastICA(n_components=2)
|
|
233
|
+
data_ica = ica.fit_transform(np.column_stack((series, t_obs)))
|
|
234
|
+
|
|
235
|
+
# Inverse transform to get denoised series
|
|
236
|
+
data_denoised = ica.inverse_transform(data_ica)
|
|
237
|
+
|
|
238
|
+
# Flatten the denoised series
|
|
239
|
+
# series_denoised_flat = series_denoised.flatten()
|
|
240
|
+
|
|
241
|
+
# Interpolate to get values at the desired output time points
|
|
242
|
+
denoised_output = np.interp(t_interp, t_obs, data_denoised[:, 0])
|
|
243
|
+
# denoised_output = np.interp(t_out, t_interp, series_denoised_flat)
|
|
244
|
+
|
|
245
|
+
return denoised_output[t_out]
|
|
246
|
+
except Exception:
|
|
247
|
+
return np.zeros(len(t_out))
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def dask_smooth(
|
|
251
|
+
dask_array: np.ndarray,
|
|
252
|
+
t_obs: np.ndarray,
|
|
253
|
+
t_interp: np.ndarray,
|
|
254
|
+
t_out: np.ndarray,
|
|
255
|
+
filt_func: str = gaussian_smooth,
|
|
256
|
+
t_win: int = 90,
|
|
257
|
+
sigma: int = 3,
|
|
258
|
+
order: int = 3,
|
|
259
|
+
axis: int = 2,
|
|
260
|
+
) -> da.array:
|
|
261
|
+
"""
|
|
262
|
+
Apply smoothing to the input Dask array along the specified axis using the specified method.
|
|
263
|
+
|
|
264
|
+
:param dask_array: Input Dask array to be smoothed.
|
|
265
|
+
:param t_obs: Array of observation times corresponding to the input dask_array.
|
|
266
|
+
:param t_interp: Array of times at which to interpolate the data.
|
|
267
|
+
:param t_out: Array of times at which to output the smoothed data.
|
|
268
|
+
:param filt_func: Smoothing method to be used ("gaussian", "emwa", "median", "savgol") (default is "gaussian")
|
|
269
|
+
:param t_win: Smoothing window size (default is 90)
|
|
270
|
+
:param sigma: Standard deviation for Gaussian smoothing (default is 3)
|
|
271
|
+
:param order : Order of the smoothing function (default is 3)
|
|
272
|
+
:param axis: Axis along which to apply the smoothing.
|
|
273
|
+
|
|
274
|
+
:return: A Dask array containing the smoothed data.
|
|
275
|
+
"""
|
|
276
|
+
|
|
277
|
+
# TODO : using scipy.interpolate instead of np.interp to do it for one chunk?
|
|
278
|
+
# But it could be slow and memory intensive
|
|
279
|
+
|
|
280
|
+
return da.from_array(
|
|
281
|
+
np.apply_along_axis(
|
|
282
|
+
filt_func,
|
|
283
|
+
axis,
|
|
284
|
+
dask_array,
|
|
285
|
+
t_obs=t_obs,
|
|
286
|
+
t_interp=t_interp,
|
|
287
|
+
t_out=t_out,
|
|
288
|
+
t_win=t_win,
|
|
289
|
+
sigma=sigma,
|
|
290
|
+
order=order,
|
|
291
|
+
)
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
def dask_smooth_wrapper(
|
|
296
|
+
dask_array: da.array,
|
|
297
|
+
dates: xr.DataArray,
|
|
298
|
+
t_out: np.ndarray,
|
|
299
|
+
smooth_method: str = "gaussian",
|
|
300
|
+
t_win: int = 90,
|
|
301
|
+
sigma: int = 3,
|
|
302
|
+
order: int = 3,
|
|
303
|
+
axis: int = 2,
|
|
304
|
+
):
|
|
305
|
+
"""
|
|
306
|
+
A function that wraps a Dask array to apply a smoothing function.
|
|
307
|
+
|
|
308
|
+
:param dask_array: Dask array to be smoothed
|
|
309
|
+
:param dates: Array of the central dates of the data
|
|
310
|
+
:param t_out: Output timestamps for the smoothed array
|
|
311
|
+
:param smooth_method: Smoothing method to be used ("gaussian", "emwa", "median", "savgol") (default is "gaussian")
|
|
312
|
+
:param t_win: Smoothing window size (default is 90)
|
|
313
|
+
:param sigma: Standard deviation for Gaussian smoothing (default is 3)
|
|
314
|
+
:param order: Order of the smoothing function (default is 3)
|
|
315
|
+
:param axis: Axis along which smoothing is applied (default is 2)
|
|
316
|
+
|
|
317
|
+
:return: Smoothed dask array with specified parameters.
|
|
318
|
+
"""
|
|
319
|
+
|
|
320
|
+
# Conversion of the mid_date of the observations into numerical values
|
|
321
|
+
# It corresponds to the difference between each mid_date and the minimal date, in days
|
|
322
|
+
t_obs = (dates.data - dates.data.min()).astype("timedelta64[D]").astype("float64")
|
|
323
|
+
|
|
324
|
+
if t_out.dtype == "datetime64[ns]" or t_out.dtype == "<M8[s]": # Convert ns to days
|
|
325
|
+
t_out = (t_out - dates.data.min()).astype("timedelta64[D]").astype("int")
|
|
326
|
+
if t_out.min() < 0:
|
|
327
|
+
t_obs = t_obs - t_out.min() # Ensure the output time points are within the range of interpolated points
|
|
328
|
+
t_out = t_out - t_out.min()
|
|
329
|
+
|
|
330
|
+
# Some mid_date could be exactly the same, this will raise error latter
|
|
331
|
+
# Therefore we add very small values to it
|
|
332
|
+
while np.unique(t_obs).size < t_obs.size:
|
|
333
|
+
t_obs += np.random.uniform(
|
|
334
|
+
low=0.01, high=0.09, size=t_obs.shape
|
|
335
|
+
) # Add a small value to make it unique, in case of non-monotonic time point
|
|
336
|
+
t_obs.sort()
|
|
337
|
+
|
|
338
|
+
t_interp = np.arange(
|
|
339
|
+
0, int(max(t_obs.max(), t_out.max()) + 1), 1
|
|
340
|
+
) # Time stamps for interpolated velocity, here every day
|
|
341
|
+
|
|
342
|
+
# Apply a kernel on the observations to get a time series with a temporal sampling specified by t_interp
|
|
343
|
+
filt_func = {
|
|
344
|
+
"gaussian": gaussian_smooth,
|
|
345
|
+
"ewma": ewma_smooth,
|
|
346
|
+
"median": median_smooth,
|
|
347
|
+
"savgol": savgol_smooth,
|
|
348
|
+
"ICA": ica_denoise,
|
|
349
|
+
"lowess": lowess_smooth,
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
da_smooth = dask_array.map_blocks(
|
|
353
|
+
dask_smooth,
|
|
354
|
+
filt_func=filt_func[smooth_method],
|
|
355
|
+
t_obs=t_obs,
|
|
356
|
+
t_interp=t_interp,
|
|
357
|
+
t_out=t_out,
|
|
358
|
+
t_win=t_win,
|
|
359
|
+
sigma=sigma,
|
|
360
|
+
order=order,
|
|
361
|
+
axis=axis,
|
|
362
|
+
dtype=dask_array.dtype,
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
return da_smooth
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
def z_score_filt(obs: da.array, z_thres: int = 2, axis: int = 2):
|
|
369
|
+
"""
|
|
370
|
+
Remove the observations if it is 3 time the standard deviation from the average of observations over this pixel
|
|
371
|
+
:param obs: cube data to filter
|
|
372
|
+
:param z_thres: threshold to remove observations, if the absolute zscore is higher than this threshold (default is 3)
|
|
373
|
+
:param axis: axis on which to perform the zscore computation
|
|
374
|
+
:return: boolean mask
|
|
375
|
+
"""
|
|
376
|
+
|
|
377
|
+
mean = np.nanmean(obs, axis=axis, keepdims=True)
|
|
378
|
+
std_dev = np.nanstd(obs, axis=axis, keepdims=True)
|
|
379
|
+
|
|
380
|
+
z_scores = (obs - mean) / std_dev
|
|
381
|
+
inlier_flag = np.abs(z_scores) < z_thres
|
|
382
|
+
|
|
383
|
+
return inlier_flag
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
def mz_score_filt(obs: da.array, mz_thres: int = 3.5, axis: int = 2):
|
|
387
|
+
"""
|
|
388
|
+
Remove the observations if it is 3.5 time the MAD from the median of observations over this pixel
|
|
389
|
+
:param obs: cube data to filter
|
|
390
|
+
:param mz_thres: threshold to remove observations, if the absolute zscore is higher than this threshold (default is 3)
|
|
391
|
+
:param axis: axis on which to perform the zscore computation
|
|
392
|
+
:return: boolean mask
|
|
393
|
+
"""
|
|
394
|
+
|
|
395
|
+
med = np.nanmedian(obs, axis=axis, keepdims=True)
|
|
396
|
+
mad = np.nanmedian(abs(obs - med), axis=axis, keepdims=True)
|
|
397
|
+
|
|
398
|
+
# mad = median_abs_deviation(obs, axis=axis)
|
|
399
|
+
|
|
400
|
+
mz_scores = 0.6745 * (obs - med) / mad
|
|
401
|
+
inlier_flag = np.abs(mz_scores) < mz_thres
|
|
402
|
+
|
|
403
|
+
return inlier_flag
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
def NVVC_angle_filt(
|
|
407
|
+
obs_cpx: np.array, vvc_thres: float = 0.1, angle_thres: int = 45, z_thres: int = 2, axis: int = 2
|
|
408
|
+
) -> np.array:
|
|
409
|
+
"""
|
|
410
|
+
Combine angle filter and zscore
|
|
411
|
+
If the VVC is lower than a given threshold, outliers are filtered out according to the zscore, else to the median angle filter,
|
|
412
|
+
i.e. pixels are filtered out if the angle with the observation is angle_thres away from the median vector
|
|
413
|
+
:param obs_cpx: cube data to filter
|
|
414
|
+
:param vvc_thres: threshold to combine zscore and median_angle filter
|
|
415
|
+
:param angle_thres: threshold to remove observations, remove the observation if it is angle_thres away from the median vector
|
|
416
|
+
:param z_thres: threshold to remove observations, if the absolute zscore is higher than this threshold (default is 3)
|
|
417
|
+
:param axis: axis on which to perform the zscore computation
|
|
418
|
+
:return: boolean mask
|
|
419
|
+
"""
|
|
420
|
+
|
|
421
|
+
vx, vy = np.real(obs_cpx), np.imag(obs_cpx)
|
|
422
|
+
vx_mean = np.nanmedian(vx, axis=axis, keepdims=True)
|
|
423
|
+
vy_mean = np.nanmedian(vy, axis=axis, keepdims=True)
|
|
424
|
+
mean_magnitude = np.hypot(vx_mean, vy_mean) # compute the averaged norm of the observations
|
|
425
|
+
|
|
426
|
+
velo_magnitude = np.hypot(vx, vy) # compute the norm of each observations
|
|
427
|
+
x_component = np.nansum(vx / velo_magnitude, axis=axis)
|
|
428
|
+
y_component = np.nansum(vy / velo_magnitude, axis=axis)
|
|
429
|
+
|
|
430
|
+
nz = velo_magnitude.shape[axis]
|
|
431
|
+
VVC = (
|
|
432
|
+
np.hypot(x_component, y_component) / nz
|
|
433
|
+
) # velocity coherence as defined in Charrier, L., Yan, Y., Colin Koeniguer, E., Mouginot, J., Millan, R., & Trouvé, E. (2022). Fusion of multi-temporal and multi-sensor ice velocity observations.
|
|
434
|
+
# ISPRS annals of the photogrammetry, remote sensing and spatial information sciences, 3, 311-318.
|
|
435
|
+
VVC = np.expand_dims(VVC, axis=axis)
|
|
436
|
+
|
|
437
|
+
vvc_cond = VVC > vvc_thres
|
|
438
|
+
|
|
439
|
+
dot_product = vx_mean * vx + vy_mean * vy
|
|
440
|
+
|
|
441
|
+
angle_filter = dot_product / (mean_magnitude * velo_magnitude) > np.cos(angle_thres * np.pi / 180)
|
|
442
|
+
|
|
443
|
+
inlier_flag = np.where(vvc_cond, angle_filter, z_score_filt(velo_magnitude, z_thres=z_thres, axis=axis))
|
|
444
|
+
|
|
445
|
+
return inlier_flag
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
def NVVC_angle_mzscore_filt(
|
|
449
|
+
obs_cpx: np.array, vvc_thres: float = 0.1, angle_thres: int = 45, mz_thres: int = 3.5, axis: int = 2
|
|
450
|
+
) -> np.array:
|
|
451
|
+
"""
|
|
452
|
+
Combine angle filter and zscore
|
|
453
|
+
If the VVC is lower than a given threshold, outliers are filtered out according to the zscore, else to the median angle filter,
|
|
454
|
+
i.e. pixels are filtered out if the angle with the observation is angle_thres away from the median vector
|
|
455
|
+
:param obs_cpx: cube data to filter
|
|
456
|
+
:param vvc_thres: threshold to combine zscore and median_angle filter
|
|
457
|
+
:param angle_thres: threshold to remove observations, remove the observation if it is angle_thres away from the median vector
|
|
458
|
+
:param mz_thres: threshold to remove observations, if the absolute zscore is higher than this threshold (default is 3)
|
|
459
|
+
:param axis: axis on which to perform the zscore computation
|
|
460
|
+
:return: boolean mask
|
|
461
|
+
"""
|
|
462
|
+
|
|
463
|
+
vx, vy = np.real(obs_cpx), np.imag(obs_cpx)
|
|
464
|
+
vx_mean = np.nanmedian(vx, axis=axis, keepdims=True)
|
|
465
|
+
vy_mean = np.nanmedian(vy, axis=axis, keepdims=True)
|
|
466
|
+
mean_magnitude = np.hypot(vx_mean, vy_mean) # compute the averaged norm of the observations
|
|
467
|
+
|
|
468
|
+
velo_magnitude = np.hypot(vx, vy) # compute the norm of each observations
|
|
469
|
+
x_component = np.nansum(vx / velo_magnitude, axis=axis)
|
|
470
|
+
y_component = np.nansum(vy / velo_magnitude, axis=axis)
|
|
471
|
+
|
|
472
|
+
nz = velo_magnitude.shape[axis]
|
|
473
|
+
VVC = (
|
|
474
|
+
np.hypot(x_component, y_component) / nz
|
|
475
|
+
) # velocity coherence as defined in Charrier, L., Yan, Y., Colin Koeniguer, E., Mouginot, J., Millan, R., & Trouvé, E. (2022). Fusion of multi-temporal and multi-sensor ice velocity observations.
|
|
476
|
+
# ISPRS annals of the photogrammetry, remote sensing and spatial information sciences, 3, 311-318.
|
|
477
|
+
VVC = np.expand_dims(VVC, axis=axis)
|
|
478
|
+
|
|
479
|
+
vvc_cond = VVC > vvc_thres
|
|
480
|
+
|
|
481
|
+
dot_product = vx_mean * vx + vy_mean * vy
|
|
482
|
+
|
|
483
|
+
angle_filter = dot_product / (mean_magnitude * velo_magnitude) > np.cos(angle_thres * np.pi / 180)
|
|
484
|
+
|
|
485
|
+
inlier_flag = np.where(vvc_cond, angle_filter, mz_score_filt(velo_magnitude, mz_thres=mz_thres, axis=axis))
|
|
486
|
+
|
|
487
|
+
return inlier_flag
|
|
488
|
+
|
|
489
|
+
|
|
490
|
+
def median_magnitude_filt(obs_cpx: np.array, median_magnitude_thres: int = 3, axis: int = 2):
|
|
491
|
+
"""
|
|
492
|
+
Remove the observation if it median_magnitude_thres times bigger than the mean velocity at pixel, or if it is
|
|
493
|
+
1/median_magnitude_thres times smaller than the mean velocity at pixel
|
|
494
|
+
|
|
495
|
+
:param obs_cpx: [np array] --- Cube data to filter (complex where the real part is vx and the imaginary part is vy)
|
|
496
|
+
:param median_magnitude_thres: [int] [default is 3] --- Position of the threshold relatively to the mean velocity at pixel
|
|
497
|
+
:param axis: [int] [default is 2] --- Axis on which the threshold should be applied (default is the time axis)
|
|
498
|
+
|
|
499
|
+
:return inlier_flag: [np array] --- Boolean mask of the size of vx (and vy)
|
|
500
|
+
"""
|
|
501
|
+
|
|
502
|
+
vv = np.abs(obs_cpx)
|
|
503
|
+
mean_magnitude = np.nanmedian(vv, axis=axis, keepdims=True)
|
|
504
|
+
|
|
505
|
+
inlier_flag = np.where(
|
|
506
|
+
(vv > mean_magnitude / median_magnitude_thres) & (vv < mean_magnitude * median_magnitude_thres), True, False
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
return inlier_flag
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
def median_angle_filt(obs_cpx: np.array, angle_thres: int = 45, axis: int = 2):
|
|
513
|
+
"""
|
|
514
|
+
Remove the observation if it is angle_thres away from the median vector
|
|
515
|
+
:param obs_cpx: cube data to filter
|
|
516
|
+
:param angle_thres: threshold to remove observations, remove the observation if it is angle_thres away from the median vector
|
|
517
|
+
:param axis: axis on which to perform the zscore computation
|
|
518
|
+
:return: boolean mask
|
|
519
|
+
"""
|
|
520
|
+
|
|
521
|
+
vx, vy = np.real(obs_cpx), np.imag(obs_cpx)
|
|
522
|
+
|
|
523
|
+
vx_mean = np.nanmedian(vx, axis=axis, keepdims=True)
|
|
524
|
+
vy_mean = np.nanmedian(vy, axis=axis, keepdims=True)
|
|
525
|
+
|
|
526
|
+
mean_magnitude = np.hypot(vx_mean, vy_mean)
|
|
527
|
+
velo_magnitude = np.hypot(vx, vy)
|
|
528
|
+
|
|
529
|
+
dot_product = vx_mean * vx + vy_mean * vy
|
|
530
|
+
angle_filter = dot_product / (mean_magnitude * velo_magnitude) > np.cos(angle_thres * np.pi / 180)
|
|
531
|
+
|
|
532
|
+
bis_cond = mean_magnitude > 10
|
|
533
|
+
inlier_flag = np.where(bis_cond, angle_filter, z_score_filt(velo_magnitude, z_thres=3, axis=axis))
|
|
534
|
+
|
|
535
|
+
return inlier_flag
|
|
536
|
+
|
|
537
|
+
|
|
538
|
+
def flow_angle_filt(
|
|
539
|
+
obs_cpx: xr.DataArray,
|
|
540
|
+
direction: xr.DataArray,
|
|
541
|
+
angle_thres: int = 45,
|
|
542
|
+
z_thres: int = 3,
|
|
543
|
+
axis: int = 2,
|
|
544
|
+
) -> da.array:
|
|
545
|
+
"""
|
|
546
|
+
Remove the observations if it is angle_thres away from the given flow direction
|
|
547
|
+
Combine this filter based on the aspect with a filter based on the zscore only if the 4/5 of the observations slower than 5 m/y
|
|
548
|
+
:param obs_cpx: cube data to filter
|
|
549
|
+
:param direction: given flow direction
|
|
550
|
+
:param angle_thres: threshold to remove observations, remove the observation if it is angle_thres away from the median vector
|
|
551
|
+
:param axis: axis on which to perform the zscore computation
|
|
552
|
+
:return: boolean mask
|
|
553
|
+
"""
|
|
554
|
+
vx, vy = np.real(obs_cpx), np.imag(obs_cpx)
|
|
555
|
+
velo_magnitude = np.hypot(vx, vy) # compute the norm of each observations
|
|
556
|
+
|
|
557
|
+
angle_rad = np.arctan2(vx, vy)
|
|
558
|
+
|
|
559
|
+
flow_direction = (np.rad2deg(angle_rad) + 360) % 360
|
|
560
|
+
|
|
561
|
+
direction_diff = np.abs((flow_direction - direction + 180) % 360 - 180)
|
|
562
|
+
|
|
563
|
+
angle_filter = direction_diff < angle_thres
|
|
564
|
+
|
|
565
|
+
# if 1/5 of the observations larger than 5 m/y, then consider it as moving area
|
|
566
|
+
# valid_and_greater_than_10 = (~np.isnan(velo_magnitude)) & (velo_magnitude > 5)
|
|
567
|
+
# bis_ratio = np.sum(valid_and_greater_than_10, axis=2) / np.sum(~np.isnan(velo_magnitude), axis=2)
|
|
568
|
+
# bis_cond = bis_ratio.values[:, :, np.newaxis] > 0.2
|
|
569
|
+
|
|
570
|
+
# mag_filter = np.where(bis_cond , True, z_score_filt(velo_magnitude, z_thres=z_thres, axis=axis))
|
|
571
|
+
# angle_filter[np.expand_dims(np.isnan(direction), axis=2)] = True
|
|
572
|
+
angle_filter = angle_filter.where(
|
|
573
|
+
~np.isnan(direction), True
|
|
574
|
+
) # change the stable area to true in case of all invalid data
|
|
575
|
+
mag_filter = np.where(~np.isnan(direction), True, z_score_filt(velo_magnitude, z_thres=z_thres, axis=axis))
|
|
576
|
+
inlier_flag = np.logical_and(mag_filter, angle_filter.data)
|
|
577
|
+
|
|
578
|
+
return xr.DataArray(inlier_flag, dims=obs_cpx.dims, coords=obs_cpx.coords)
|
|
579
|
+
|
|
580
|
+
|
|
581
|
+
def dask_filt_warpper(
|
|
582
|
+
da_vx: xr.DataArray,
|
|
583
|
+
da_vy: xr.DataArray,
|
|
584
|
+
filt_method: str = "median_angle",
|
|
585
|
+
vvc_thres: float = 0.3,
|
|
586
|
+
angle_thres: int = 45,
|
|
587
|
+
z_thres: int = 2,
|
|
588
|
+
mz_thres=3.5,
|
|
589
|
+
magnitude_thres: int = 1000,
|
|
590
|
+
median_magnitude_thres=3,
|
|
591
|
+
error_thres: int = 100,
|
|
592
|
+
slope: xr.Dataset = None,
|
|
593
|
+
aspect: xr.Dataset = None,
|
|
594
|
+
direction: xr.Dataset = None,
|
|
595
|
+
axis: int = 2,
|
|
596
|
+
):
|
|
597
|
+
"""
|
|
598
|
+
|
|
599
|
+
:param da_vx: vx observations
|
|
600
|
+
:param da_vy: vy observations
|
|
601
|
+
:param filt_method: filtering method
|
|
602
|
+
:param vvc_thres: threshold to combine zscore and median_angle filter
|
|
603
|
+
:param angle_thres: threshold to remove observations, remove the observation if it is angle_thres away from the median vector
|
|
604
|
+
:param z_thres: threshold to remove observations, if the absolute zscore is higher than this threshold (default is 2)
|
|
605
|
+
:param mz_thres: threshold to remove observations, if the absolute mzscore is higher than this threshold (default is 3.5)
|
|
606
|
+
:param magnitude_thres: threshold to remove observations, if the magnitude is higher than this threshold (default is 1000)
|
|
607
|
+
:param error_thres: threshold to remove observations, if the magnitude is higher than this threshold (default is 100)
|
|
608
|
+
:param axis: axis on which to perform the zscore computation (default is 2)
|
|
609
|
+
:return:
|
|
610
|
+
"""
|
|
611
|
+
|
|
612
|
+
if filt_method == "median_angle": # delete according to a threshold in angle between observations and median vector
|
|
613
|
+
obs_arr = da_vx.data + 1j * da_vy.data
|
|
614
|
+
inlier_mask = obs_arr.map_blocks(median_angle_filt, angle_thres=angle_thres, axis=axis, dtype=obs_arr.dtype)
|
|
615
|
+
|
|
616
|
+
elif filt_method == "vvc_angle": # combination between z_score and median_angle
|
|
617
|
+
obs_arr = da_vx.data + 1j * da_vy.data
|
|
618
|
+
inlier_mask = obs_arr.map_blocks(
|
|
619
|
+
NVVC_angle_filt, vvc_thres=vvc_thres, angle_thres=angle_thres, axis=axis, dtype=obs_arr.dtype
|
|
620
|
+
)
|
|
621
|
+
|
|
622
|
+
elif filt_method == "vvc_angle_mzscore": # combination between z_score and median_angle
|
|
623
|
+
obs_arr = da_vx.data + 1j * da_vy.data
|
|
624
|
+
inlier_mask = obs_arr.map_blocks(
|
|
625
|
+
NVVC_angle_mzscore_filt,
|
|
626
|
+
vvc_thres=vvc_thres,
|
|
627
|
+
angle_thres=angle_thres,
|
|
628
|
+
mz_thres=mz_thres,
|
|
629
|
+
axis=axis,
|
|
630
|
+
dtype=obs_arr.dtype,
|
|
631
|
+
)
|
|
632
|
+
|
|
633
|
+
elif filt_method == "z_score": # threshold according to the zscore
|
|
634
|
+
inlier_mask_vx = da_vx.data.map_blocks(z_score_filt, z_thres=z_thres, axis=axis, dtype=da_vx.dtype)
|
|
635
|
+
inlier_mask_vy = da_vy.data.map_blocks(z_score_filt, z_thres=z_thres, axis=axis, dtype=da_vy.dtype)
|
|
636
|
+
inlier_mask = np.logical_and(inlier_mask_vx, inlier_mask_vy)
|
|
637
|
+
|
|
638
|
+
elif filt_method == "mz_score": # threshold according to the zscore
|
|
639
|
+
inlier_mask_vx = da_vx.data.map_blocks(mz_score_filt, mz_thres=mz_thres, axis=axis, dtype=da_vx.dtype)
|
|
640
|
+
inlier_mask_vy = da_vy.data.map_blocks(mz_score_filt, mz_thres=mz_thres, axis=axis, dtype=da_vy.dtype)
|
|
641
|
+
inlier_mask = np.logical_and(inlier_mask_vx, inlier_mask_vy)
|
|
642
|
+
|
|
643
|
+
elif filt_method == "magnitude": # delete according to a threshold in magnitude
|
|
644
|
+
obs_arr = np.hypot(da_vx.data, da_vy.data)
|
|
645
|
+
inlier_mask = obs_arr.map_blocks(lambda x: x < magnitude_thres, dtype=obs_arr.dtype)
|
|
646
|
+
|
|
647
|
+
elif (
|
|
648
|
+
filt_method == "median_magnitude"
|
|
649
|
+
): # the threshold in magnitude is computed relatively to the median of the data
|
|
650
|
+
obs_arr = da_vx.data + 1j * da_vy.data
|
|
651
|
+
inlier_mask = obs_arr.map_blocks(
|
|
652
|
+
median_magnitude_filt, median_magnitude_thres=median_magnitude_thres, axis=axis, dtype=obs_arr.dtype
|
|
653
|
+
)
|
|
654
|
+
|
|
655
|
+
elif filt_method == "error": # delete according to a threshold in error
|
|
656
|
+
inlier_mask_vx = da_vx.data.map_blocks(lambda x: x < error_thres, dtype=da_vx.dtype)
|
|
657
|
+
inlier_mask_vy = da_vy.data.map_blocks(lambda x: x < error_thres, dtype=da_vy.dtype)
|
|
658
|
+
inlier_mask = np.logical_and(inlier_mask_vx, inlier_mask_vy)
|
|
659
|
+
|
|
660
|
+
elif filt_method == "flow_angle":
|
|
661
|
+
obs_arr = da_vx + 1j * da_vy
|
|
662
|
+
_, direction_expanded = xr.broadcast(obs_arr, direction["direction"])
|
|
663
|
+
direction_expanded = direction_expanded.chunk(obs_arr.chunks)
|
|
664
|
+
inlier_mask = xr.map_blocks(
|
|
665
|
+
flow_angle_filt,
|
|
666
|
+
obs_arr,
|
|
667
|
+
args=(direction_expanded,),
|
|
668
|
+
template=obs_arr,
|
|
669
|
+
kwargs={"angle_thres": angle_thres, "z_thres": z_thres, "axis": axis},
|
|
670
|
+
)
|
|
671
|
+
else:
|
|
672
|
+
raise ValueError(
|
|
673
|
+
"Filtering method should be either 'median_angle', 'vvc_angle', 'topo_angle', 'z_score', 'magnitude', 'median_magnitude' or 'error'."
|
|
674
|
+
)
|
|
675
|
+
|
|
676
|
+
return inlier_mask.compute()
|