pyadps 0.3.3b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyadps/Home_Page.py +42 -0
- pyadps/__init__.py +8 -0
- pyadps/__main__.py +15 -0
- pyadps/pages/01_Read_File.py +458 -0
- pyadps/pages/02_View_Raw_Data.py +164 -0
- pyadps/pages/03_Download_Raw_File.py +298 -0
- pyadps/pages/04_Sensor_Health.py +905 -0
- pyadps/pages/05_QC_Test.py +476 -0
- pyadps/pages/06_Profile_Test.py +970 -0
- pyadps/pages/07_Velocity_Test.py +600 -0
- pyadps/pages/08_Write_File.py +574 -0
- pyadps/pages/09_Auto_process.py +62 -0
- pyadps/pages/__init__.py +0 -0
- pyadps/utils/__init__.py +12 -0
- pyadps/utils/autoprocess.py +530 -0
- pyadps/utils/metadata/config.ini +99 -0
- pyadps/utils/metadata/demo.000 +0 -0
- pyadps/utils/metadata/flmeta.json +422 -0
- pyadps/utils/metadata/vlmeta.json +567 -0
- pyadps/utils/plotgen.py +728 -0
- pyadps/utils/profile_test.py +556 -0
- pyadps/utils/pyreadrdi.py +969 -0
- pyadps/utils/readrdi.py +1610 -0
- pyadps/utils/script.py +201 -0
- pyadps/utils/sensor_health.py +120 -0
- pyadps/utils/signal_quality.py +455 -0
- pyadps/utils/velocity_test.py +200 -0
- pyadps/utils/writenc.py +339 -0
- pyadps-0.3.3b0.dist-info/LICENSE +8 -0
- pyadps-0.3.3b0.dist-info/METADATA +172 -0
- pyadps-0.3.3b0.dist-info/RECORD +33 -0
- pyadps-0.3.3b0.dist-info/WHEEL +4 -0
- pyadps-0.3.3b0.dist-info/entry_points.txt +5 -0
@@ -0,0 +1,455 @@
|
|
1
|
+
import numpy as np
|
2
|
+
from pyadps.utils.plotgen import PlotNoise
|
3
|
+
from pyadps.utils.readrdi import ReadFile
|
4
|
+
|
5
|
+
|
6
|
+
def qc_check(var, mask, cutoff=0):
|
7
|
+
"""
|
8
|
+
Perform a quality control check on the provided data and update the mask
|
9
|
+
based on a cutoff threshold. Values in `var` that are less than the cutoff
|
10
|
+
are marked as invalid in the mask.
|
11
|
+
|
12
|
+
Parameters
|
13
|
+
----------
|
14
|
+
var : numpy.ndarray
|
15
|
+
The input array containing data to be checked against the cutoff.
|
16
|
+
mask : numpy.ndarray
|
17
|
+
An integer array of the same shape as `var`, where `1` indicates
|
18
|
+
invalid data and `0` indicates valid data.
|
19
|
+
cutoff : int, optional
|
20
|
+
The threshold value for quality control. Any value in `var` less than
|
21
|
+
or equal to this cutoff will be marked as invalid in the mask. Default is 0.
|
22
|
+
|
23
|
+
Returns
|
24
|
+
-------
|
25
|
+
numpy.ndarray
|
26
|
+
An updated integer mask array of the same shape as `var`, with `1`
|
27
|
+
indicating invalid data and `0` indicating valid data.
|
28
|
+
|
29
|
+
Notes
|
30
|
+
-----
|
31
|
+
- The function modifies the `mask` by applying the cutoff condition.
|
32
|
+
Values in `var` that are less than or equal to the cutoff will be
|
33
|
+
marked as invalid (`1`), while all other values will remain valid (`0`).
|
34
|
+
- Ensure that `var` and `mask` are compatible in shape for element-wise
|
35
|
+
operations.
|
36
|
+
|
37
|
+
Example
|
38
|
+
-------
|
39
|
+
>>> import pyadps
|
40
|
+
>>> ds = pyadps.Readfile('dummy.000')
|
41
|
+
>>> var = ds.echo.data
|
42
|
+
>>> mask = qc_check(var, mask, cutoff=40)
|
43
|
+
"""
|
44
|
+
|
45
|
+
shape = np.shape(var)
|
46
|
+
if len(shape) == 2:
|
47
|
+
mask[var[:, :] < cutoff] = 1
|
48
|
+
else:
|
49
|
+
beam = shape[0]
|
50
|
+
for i in range(beam):
|
51
|
+
mask[var[i, :, :] < cutoff] = 1
|
52
|
+
# values, counts = np.unique(mask, return_counts=True)
|
53
|
+
# print(values, counts, np.round(counts[1] * 100 / np.sum(counts)))
|
54
|
+
return mask
|
55
|
+
|
56
|
+
|
57
|
+
def correlation_check(ds, mask, cutoff=64):
|
58
|
+
"""
|
59
|
+
Perform an correlation check on the provided variable and update the
|
60
|
+
mask to mark valid and invalid values based on a cutoff threshold.
|
61
|
+
|
62
|
+
Parameters
|
63
|
+
----------
|
64
|
+
ds : pyadps.dataset
|
65
|
+
The input pyadps dataframe containing correlation data to be checked.
|
66
|
+
Accepts 2-D or 3-D masks.
|
67
|
+
mask : numpy.ndarray
|
68
|
+
An integer array of the same shape as `var`, where `1` indicates invalid
|
69
|
+
data or masked data and `0` indicates valid data.
|
70
|
+
cutoff : float, optional
|
71
|
+
The threshold value for echo intensity. Any value in `ds.correlation.data` below
|
72
|
+
this cutoff will be considered invalid and marked as `1` in the mask.
|
73
|
+
Default is 64.
|
74
|
+
|
75
|
+
Returns
|
76
|
+
-------
|
77
|
+
numpy.ndarray
|
78
|
+
An updated integer mask array of the same shape as `var`, with `1`
|
79
|
+
indicating invalid or masked data (within the cutoff limit) and `0` indicating
|
80
|
+
valid.
|
81
|
+
|
82
|
+
Notes
|
83
|
+
-----
|
84
|
+
- The function modifies the `mask` based on the cutoff condition. Valid
|
85
|
+
values in `var` retain their corresponding mask value as `0`, while
|
86
|
+
invalid values or previously masked elements are marked as `1`.
|
87
|
+
operations.
|
88
|
+
|
89
|
+
Example
|
90
|
+
-------
|
91
|
+
>>> import pyadps
|
92
|
+
>>> ds = pyadps.Readfile('dummy.000')
|
93
|
+
>>> outmask = correlation_check(ds, mask, cutoff=9999)
|
94
|
+
"""
|
95
|
+
correlation = ds.correlation.data
|
96
|
+
mask = qc_check(correlation, mask, cutoff=cutoff)
|
97
|
+
return mask
|
98
|
+
|
99
|
+
def echo_check(ds, mask, cutoff=40):
|
100
|
+
"""
|
101
|
+
Perform an echo intensity check on the provided variable and update the
|
102
|
+
mask to mark valid and invalid values based on a cutoff threshold.
|
103
|
+
|
104
|
+
Parameters
|
105
|
+
----------
|
106
|
+
ds : pyadps.dataset
|
107
|
+
The input pyadps dataframe containing echo intensity data to be checked.
|
108
|
+
Accepts 2-D or 3-D masks.
|
109
|
+
mask : numpy.ndarray
|
110
|
+
An integer array of the same shape as `var`, where `1` indicates invalid
|
111
|
+
data or masked data and `0` indicates valid data.
|
112
|
+
cutoff : float, optional
|
113
|
+
The threshold value for echo intensity. Any value in `ds.echo.data` below
|
114
|
+
this cutoff will be considered invalid and marked as `1` in the mask.
|
115
|
+
Default is 40.
|
116
|
+
|
117
|
+
Returns
|
118
|
+
-------
|
119
|
+
numpy.ndarray
|
120
|
+
An updated integer mask array of the same shape as `var`, with `1`
|
121
|
+
indicating invalid or masked data (within the cutoff limit) and `0` indicating
|
122
|
+
valid.
|
123
|
+
|
124
|
+
Notes
|
125
|
+
-----
|
126
|
+
- The function modifies the `mask` based on the cutoff condition. Valid
|
127
|
+
values in `var` retain their corresponding mask value as `0`, while
|
128
|
+
invalid values or previously masked elements are marked as `1`.
|
129
|
+
- Ensure that `var` and `mask` are compatible in shape for element-wise
|
130
|
+
operations.
|
131
|
+
|
132
|
+
Example
|
133
|
+
-------
|
134
|
+
>>> import pyadps
|
135
|
+
>>> ds = pyadps.Readfile('dummy.000')
|
136
|
+
>>> outmask = echo_check(ds, mask, cutoff=9999)
|
137
|
+
"""
|
138
|
+
|
139
|
+
echo = ds.echo.data
|
140
|
+
mask = qc_check(echo, mask, cutoff=cutoff)
|
141
|
+
return mask
|
142
|
+
|
143
|
+
|
144
|
+
def ev_check(ds, mask, cutoff=9999):
|
145
|
+
"""
|
146
|
+
Perform an error velocity check on the provided variable and update the
|
147
|
+
mask to mark valid and invalid values based on a cutoff threshold.
|
148
|
+
|
149
|
+
Parameters
|
150
|
+
----------
|
151
|
+
ds : pyadps.dataset
|
152
|
+
The input pyadps dataframe containing error velocity data to be checked.
|
153
|
+
mask : numpy.ndarray
|
154
|
+
An integer array of the same shape as `var`, where `1` indicates invalid
|
155
|
+
data or masked data and `0` indicates valid data.
|
156
|
+
cutoff : float, optional
|
157
|
+
The threshold value for error velocity. Any value in `var` exceeding
|
158
|
+
this cutoff will be considered invalid and marked as `0` in the mask.
|
159
|
+
Default is 9999.
|
160
|
+
|
161
|
+
Returns
|
162
|
+
-------
|
163
|
+
numpy.ndarray
|
164
|
+
An updated integer mask array of the same shape as `var`, with `1`
|
165
|
+
indicating invalid or masked data (within the cutoff limit) and `0` indicating
|
166
|
+
valid.
|
167
|
+
|
168
|
+
Notes
|
169
|
+
-----
|
170
|
+
- The function modifies the `mask` based on the cutoff condition. Valid
|
171
|
+
values in `var` retain their corresponding mask value as `0`, while
|
172
|
+
invalid values or previously masked elements are marked as `1`.
|
173
|
+
- Ensure that `var` and `mask` are compatible in shape for element-wise
|
174
|
+
operations.
|
175
|
+
|
176
|
+
Example
|
177
|
+
-------
|
178
|
+
>>> import pyadps
|
179
|
+
>>> ds = pyadps.Readfile('dummy.000')
|
180
|
+
>>> outmask = ev_check(ds, mask, cutoff=9999)
|
181
|
+
"""
|
182
|
+
|
183
|
+
var = ds.velocity.data[3, :, :]
|
184
|
+
shape = np.shape(var)
|
185
|
+
var = abs(var)
|
186
|
+
if len(shape) == 2:
|
187
|
+
mask[(var[:, :] >= cutoff) & (var[:, :] < 32768)] = 1
|
188
|
+
else:
|
189
|
+
beam = shape[2]
|
190
|
+
for i in range(beam):
|
191
|
+
mask[(var[i, :, :] >= cutoff) & (var[i, :, :] < 32768)] = 1
|
192
|
+
return mask
|
193
|
+
|
194
|
+
|
195
|
+
def pg_check(ds, mask, cutoff=0, threebeam=True):
|
196
|
+
"""
|
197
|
+
Perform a percent-good check on the provided data and update the mask
|
198
|
+
to mark valid and invalid values based on a cutoff threshold.
|
199
|
+
|
200
|
+
Parameters
|
201
|
+
----------
|
202
|
+
ds : pyadps.dataset
|
203
|
+
The input pyadps dataframe containing percent-good data, where values range from
|
204
|
+
0 to 100 (maximum percent good).
|
205
|
+
mask : numpy.ndarray
|
206
|
+
An integer array of the same shape as `pgood`, where `1` indicates
|
207
|
+
invalid data and `0` indicates valid data.
|
208
|
+
cutoff : float, optional
|
209
|
+
The threshold value for percent good. Any value in `pgood` greater than
|
210
|
+
or equal to this cutoff will be considered valid (marked as `0`),
|
211
|
+
while values not exceeding the cutoff are marked as invalid (`1`).
|
212
|
+
Default is 0.
|
213
|
+
threebeam : bool, optional
|
214
|
+
If `True`, sums up Percent Good 1 and Percent Good 4 for the check.
|
215
|
+
|
216
|
+
Returns
|
217
|
+
-------
|
218
|
+
numpy.ndarray
|
219
|
+
An updated integer mask array of the same shape as `pgood`, with `1`
|
220
|
+
indicating invalid data and `0` indicating valid data.
|
221
|
+
|
222
|
+
Notes
|
223
|
+
-----
|
224
|
+
- The function modifies the `mask` based on the cutoff condition. Valid
|
225
|
+
values in `pgood` are marked as `0`, while invalid values are marked
|
226
|
+
as `1` in the mask.
|
227
|
+
- Ensure that `pgood` and `mask` are compatible in shape for element-wise
|
228
|
+
operations.
|
229
|
+
- If `threebeam` is `True`, the logic may be adjusted to allow partial
|
230
|
+
validity based on specific criteria.
|
231
|
+
|
232
|
+
Example
|
233
|
+
-------
|
234
|
+
>>> import pyadps
|
235
|
+
>>> ds = pyadps.Readfile('dummy.000')
|
236
|
+
>>> outmask = pg_check(ds, mask, cutoff=50, threebeam=True)
|
237
|
+
"""
|
238
|
+
|
239
|
+
pgood = ds.percentgood.data
|
240
|
+
if threebeam:
|
241
|
+
pgood1 = pgood[0, :, :] + pgood[3, :, :]
|
242
|
+
else:
|
243
|
+
pgood1 = pgood[:, :, :]
|
244
|
+
|
245
|
+
mask[pgood1[:, :] < cutoff] = 1
|
246
|
+
return mask
|
247
|
+
|
248
|
+
|
249
|
+
def false_target(ds, mask, cutoff=255, threebeam=True):
|
250
|
+
"""
|
251
|
+
Apply a false target detection algorithm based on echo intensity values.
|
252
|
+
This function identifies invalid or false targets in the data and updates
|
253
|
+
the mask accordingly based on a specified cutoff threshold.
|
254
|
+
|
255
|
+
Parameters
|
256
|
+
----------
|
257
|
+
ds : pyadps.dataset
|
258
|
+
The input pyadps dataframe containing echo intensity values, which are used to
|
259
|
+
detect false targets.
|
260
|
+
mask : numpy.ndarray
|
261
|
+
An integer array of the same shape as `echo`, where `1` indicates
|
262
|
+
invalid or false target data and `0` indicates valid data.
|
263
|
+
cutoff : int, optional
|
264
|
+
The threshold value for echo intensity. Any value in `echo` greater
|
265
|
+
than or equal to this cutoff will be considered a false target (invalid),
|
266
|
+
marked as `1` in the mask. Default is 255.
|
267
|
+
threebeam : bool, optional
|
268
|
+
If `True`, applies a relaxed check that considers data valid even
|
269
|
+
when only three beams report valid data. Default is `True`.
|
270
|
+
|
271
|
+
Returns
|
272
|
+
-------
|
273
|
+
numpy.ndarray
|
274
|
+
An updated integer mask array of the same shape as `echo`, with `1`
|
275
|
+
indicating false target or invalid data and `0` indicating valid data.
|
276
|
+
|
277
|
+
Notes
|
278
|
+
-----
|
279
|
+
- The function modifies the `mask` by applying the cutoff condition.
|
280
|
+
Echo values greater than or equal to the cutoff are marked as false
|
281
|
+
targets (`1`), while values below the cutoff are considered valid (`0`).
|
282
|
+
- If `threebeam` is `True`, a more lenient check may be applied to handle
|
283
|
+
data with fewer valid beams.
|
284
|
+
- Ensure that `echo` and `mask` are compatible in shape for element-wise
|
285
|
+
operations.
|
286
|
+
|
287
|
+
Example
|
288
|
+
-------
|
289
|
+
>>> import pyadps
|
290
|
+
>>> ds = pyadps.Readfile('dummy.000')
|
291
|
+
>>> mask = false_target(echo, mask, cutoff=255)
|
292
|
+
"""
|
293
|
+
|
294
|
+
echo = ds.echo.data
|
295
|
+
|
296
|
+
shape = np.shape(echo)
|
297
|
+
for i in range(shape[1]):
|
298
|
+
for j in range(shape[2]):
|
299
|
+
x = np.sort(echo[:, i, j])
|
300
|
+
if threebeam:
|
301
|
+
if x[-1] - x[1] > cutoff:
|
302
|
+
mask[i, j] = 1
|
303
|
+
else:
|
304
|
+
if x[-1] - x[0] > cutoff:
|
305
|
+
mask[i, j] = 1
|
306
|
+
|
307
|
+
values, counts = np.unique(mask, return_counts=True)
|
308
|
+
# print(values, counts, np.round(counts[1] * 100 / np.sum(counts)))
|
309
|
+
return mask
|
310
|
+
|
311
|
+
|
312
|
+
def default_mask(ds):
|
313
|
+
"""
|
314
|
+
Create a default 2-D mask file based on the velocity data.
|
315
|
+
This function generates a mask where values are marked as valid or invalid
|
316
|
+
based on the missing values from the velocity data.
|
317
|
+
|
318
|
+
Parameters
|
319
|
+
----------
|
320
|
+
ds : pyadps.dataset or numpy.ndarray
|
321
|
+
A pyadps data frame is used to extract velocity and dimensions for the mask.
|
322
|
+
If numpy.ndarray, enter the values for beams, cells and ensembles.
|
323
|
+
|
324
|
+
Returns
|
325
|
+
-------
|
326
|
+
numpy.ndarray
|
327
|
+
A mask array of the same shape as `velocity`, where `1` indicates invalid
|
328
|
+
data and `0` indicates valid data.
|
329
|
+
|
330
|
+
Notes
|
331
|
+
-----
|
332
|
+
- The function uses the velocity data along with the information from the
|
333
|
+
Fixed Leader object to determine which values are valid and which are invalid.
|
334
|
+
|
335
|
+
Example
|
336
|
+
-------
|
337
|
+
>>> import pyadps
|
338
|
+
>>> ds = pyadps.ReadFile('demo.000')
|
339
|
+
>>> mask = pyadps.default_mask(ds)
|
340
|
+
"""
|
341
|
+
if isinstance(ds, ReadFile) or ds.__class__.__name__ == "ReadFile":
|
342
|
+
flobj = ds.fixedleader
|
343
|
+
velocity = ds.velocity.data
|
344
|
+
cells = flobj.field()["Cells"]
|
345
|
+
beams = flobj.field()["Beams"]
|
346
|
+
ensembles = flobj.ensembles
|
347
|
+
elif isinstance(ds, np.ndarray) and ds.ndim == 3:
|
348
|
+
velocity = ds
|
349
|
+
beams = ds.shape[0]
|
350
|
+
cells = ds.shape[1]
|
351
|
+
ensembles = ds.shape[2]
|
352
|
+
else:
|
353
|
+
raise ValueError("Input must be a 3-D numpy array or a PyADPS instance")
|
354
|
+
|
355
|
+
mask = np.zeros((cells, ensembles))
|
356
|
+
# Ignore mask for error velocity
|
357
|
+
for i in range(beams - 1):
|
358
|
+
mask[velocity[i, :, :] < -32767] = 1
|
359
|
+
return mask
|
360
|
+
|
361
|
+
|
362
|
+
def qc_prompt(ds, name, data=None):
|
363
|
+
"""
|
364
|
+
Prompt the user to confirm or adjust the quality control threshold for a specific
|
365
|
+
parameter based on predefined ranges. The function provides an interactive interface
|
366
|
+
for the user to adjust thresholds for various quality control criteria, with options
|
367
|
+
for certain thresholds like "Echo Intensity Thresh" to check the noise floor.
|
368
|
+
|
369
|
+
Parameters
|
370
|
+
----------
|
371
|
+
flobj : FixedLeader
|
372
|
+
An instance of the FixedLeader class that holds metadata and configuration
|
373
|
+
data. The `flobj` is used to retrieve the current threshold values based on
|
374
|
+
the provided parameter name.
|
375
|
+
name : str
|
376
|
+
The name of the parameter for which the threshold is being adjusted. Examples
|
377
|
+
include "Echo Intensity Thresh", "Correlation Thresh", "Percent Good Min", etc.
|
378
|
+
data : numpy.ndarray, optional
|
379
|
+
The data associated with the threshold. This is required for parameters like
|
380
|
+
"Echo Intensity Thresh" where a noise floor check might be performed. Default is None.
|
381
|
+
|
382
|
+
Returns
|
383
|
+
-------
|
384
|
+
int
|
385
|
+
The updated threshold value, either the default or the new value entered by the user.
|
386
|
+
|
387
|
+
Notes
|
388
|
+
-----
|
389
|
+
- The function will prompt the user to change the threshold for the given `name` parameter.
|
390
|
+
- For certain parameters, the user may be asked if they would like to check the noise floor
|
391
|
+
(for example, for "Echo Intensity Thresh"). This triggers the display of a plot and lets
|
392
|
+
the user select a new threshold.
|
393
|
+
- The function ensures that the new threshold is within the acceptable range for each parameter.
|
394
|
+
- The default thresholds are provided if the user chooses not to change them.
|
395
|
+
|
396
|
+
Example
|
397
|
+
-------
|
398
|
+
>>> import pyadps
|
399
|
+
>>> ds = pyadps.ReadFile('demo.000')
|
400
|
+
>>> name = "Echo Intensity Thresh"
|
401
|
+
>>> threshold = qc_prompt(ds, name, data)
|
402
|
+
The default threshold for echo intensity thresh is 0
|
403
|
+
Would you like to change the threshold [y/n]: y
|
404
|
+
Would you like to check the noise floor [y/n]: y
|
405
|
+
Threshold changed to 50
|
406
|
+
"""
|
407
|
+
|
408
|
+
flobj = ds.fixedleader
|
409
|
+
cutoff = 0
|
410
|
+
if name == "Echo Intensity Thresh":
|
411
|
+
cutoff = 0
|
412
|
+
else:
|
413
|
+
cutoff = flobj.field()[name]
|
414
|
+
|
415
|
+
if name in ["Echo Thresh", "Correlation Thresh", "False Target Thresh"]:
|
416
|
+
var_range = [0, 255]
|
417
|
+
elif name == "Percent Good Min":
|
418
|
+
var_range = [0, 100]
|
419
|
+
elif name == "Error Velocity Thresh":
|
420
|
+
var_range = [0, 5000]
|
421
|
+
else:
|
422
|
+
var_range = [0, 255]
|
423
|
+
|
424
|
+
print(f"The default threshold for {name.lower()} is {cutoff}")
|
425
|
+
affirm = input("Would you like to change the threshold [y/n]: ")
|
426
|
+
if affirm.lower() == "y":
|
427
|
+
while True:
|
428
|
+
if name == "Echo Intensity Thresh":
|
429
|
+
affirm2 = input("Would you like to check the noise floor [y/n]: ")
|
430
|
+
if affirm2.lower() == "y":
|
431
|
+
p = PlotNoise(data)
|
432
|
+
p.show()
|
433
|
+
cutoff = p.cutoff
|
434
|
+
else:
|
435
|
+
cutoff = input(
|
436
|
+
f"Enter new {name} [{var_range[0]}-{var_range[1]}]: "
|
437
|
+
)
|
438
|
+
else:
|
439
|
+
cutoff = input(f"Enter new {name} [{var_range[0]}-{var_range[1]}]: ")
|
440
|
+
|
441
|
+
cutoff = int(cutoff)
|
442
|
+
try:
|
443
|
+
if cutoff >= var_range[0] and int(cutoff) <= var_range[1]:
|
444
|
+
break
|
445
|
+
else:
|
446
|
+
print(f"Enter an integer between {var_range[0]} and {var_range[1]}")
|
447
|
+
except ValueError:
|
448
|
+
print("Enter a valid number")
|
449
|
+
|
450
|
+
print(f"Threshold changed to {cutoff}")
|
451
|
+
|
452
|
+
else:
|
453
|
+
print(f"Default threshold {cutoff} used.")
|
454
|
+
# return int(ct)
|
455
|
+
return cutoff
|
@@ -0,0 +1,200 @@
|
|
1
|
+
from itertools import groupby
|
2
|
+
from pygeomag import GeoMag
|
3
|
+
|
4
|
+
import requests
|
5
|
+
import numpy as np
|
6
|
+
import scipy as sp
|
7
|
+
|
8
|
+
|
9
|
+
def magdec(glat, glon, alt, time):
|
10
|
+
# Selecting COF file According to given year
|
11
|
+
if time >= 2010 and time < 2030:
|
12
|
+
var = 2010 + (int(time) - 2010) // 5 * 5
|
13
|
+
file_name = "wmm/WMM_{}.COF".format(str(var))
|
14
|
+
geo_mag = GeoMag(coefficients_file=file_name)
|
15
|
+
else:
|
16
|
+
geo_mag = GeoMag("wmm/WMM_2025.COF")
|
17
|
+
result = geo_mag.calculate(glat=glat, glon=glon, alt=alt, time=time)
|
18
|
+
|
19
|
+
return [[result.d]]
|
20
|
+
|
21
|
+
|
22
|
+
def wmm2020api(lat1, lon1, year):
|
23
|
+
"""
|
24
|
+
This function uses the WMM2020 API to retrieve the magnetic field values at a given location
|
25
|
+
The API need latitude, longitude and year to perform the calculation. The key in the function
|
26
|
+
must be updated time to time since the API is subjected to timely updates and the key may change.
|
27
|
+
|
28
|
+
Args:
|
29
|
+
Latitude (float)
|
30
|
+
Longitude (float)
|
31
|
+
startYear (int)
|
32
|
+
|
33
|
+
Returns:
|
34
|
+
mag -> magnetic declination at the given location in degree.
|
35
|
+
"""
|
36
|
+
baseurl_wmm = (
|
37
|
+
"https://www.ngdc.noaa.gov/geomag-web/calculators/calculateDeclination?"
|
38
|
+
)
|
39
|
+
baseurl_igrf = (
|
40
|
+
"https://www.ngdc.noaa.gov/geomag-web/calculators/calculateDeclination?"
|
41
|
+
)
|
42
|
+
baseurl_emm = "https://emmcalc.geomag.info/?magneticcomponent=d&"
|
43
|
+
key = "zNEw7"
|
44
|
+
resultFormat = "json"
|
45
|
+
if year >= 2025:
|
46
|
+
baseurl = baseurl_wmm
|
47
|
+
model = "WMM"
|
48
|
+
elif year >= 2019:
|
49
|
+
baseurl = baseurl_wmm
|
50
|
+
model = "IGRF"
|
51
|
+
elif year >= 2000:
|
52
|
+
baseurl = baseurl_emm
|
53
|
+
model = "EMM"
|
54
|
+
elif year >= 1590:
|
55
|
+
baseurl = baseurl_igrf
|
56
|
+
model = "IGRF"
|
57
|
+
url = "{}model={}&lat1={}&lon1={}&key={}&startYear={}&resultFormat={}".format(
|
58
|
+
baseurl, model, lat1, lon1, key, year, resultFormat
|
59
|
+
)
|
60
|
+
response = requests.get(url)
|
61
|
+
data = response.json()
|
62
|
+
results = data["result"][0]
|
63
|
+
mag = [[results["declination"]]]
|
64
|
+
|
65
|
+
return mag
|
66
|
+
|
67
|
+
|
68
|
+
# Commentin magnetic_declination model since the method is no longer using.
|
69
|
+
# def magnetic_declination(lat, lon, depth, year):
|
70
|
+
# """
|
71
|
+
# The function calculates the magnetic declination at a given location and depth.
|
72
|
+
# using a local installation of wmm2020 model.
|
73
|
+
|
74
|
+
|
75
|
+
# Args:
|
76
|
+
# lat (parameter, float): Latitude in decimals
|
77
|
+
# lon (parameter, float): Longitude in decimals
|
78
|
+
# depth (parameter, float): depth in m
|
79
|
+
# year (parameter, integer): Year
|
80
|
+
|
81
|
+
# Returns:
|
82
|
+
# mag: Magnetic declination (degrees)
|
83
|
+
# """
|
84
|
+
# import wmm2020
|
85
|
+
# mag = wmm2020.wmm(lat, lon, depth, year)
|
86
|
+
# mag = mag.decl.data
|
87
|
+
|
88
|
+
# return mag
|
89
|
+
|
90
|
+
|
91
|
+
def velocity_modifier(velocity, mag):
|
92
|
+
"""
|
93
|
+
The function uses magnetic declination from wmm2020 to correct
|
94
|
+
the horizontal velocities
|
95
|
+
|
96
|
+
Args:
|
97
|
+
velocity (numpy array): velocity array
|
98
|
+
mag: magnetic declination (degrees)
|
99
|
+
|
100
|
+
Returns:
|
101
|
+
velocity (numpy array): Rotated velocity using magnetic declination
|
102
|
+
"""
|
103
|
+
mag = np.deg2rad(mag[0][0])
|
104
|
+
velocity = np.where(velocity == -32768, np.nan, velocity)
|
105
|
+
velocity[0, :, :] = velocity[0, :, :] * np.cos(mag) + velocity[1, :, :] * np.sin(
|
106
|
+
mag
|
107
|
+
)
|
108
|
+
velocity[1, :, :] = -1 * velocity[0, :, :] * np.sin(mag) + velocity[
|
109
|
+
1, :, :
|
110
|
+
] * np.cos(mag)
|
111
|
+
velocity = np.where(velocity == np.nan, -32768, velocity)
|
112
|
+
|
113
|
+
return velocity
|
114
|
+
|
115
|
+
|
116
|
+
def velocity_cutoff(velocity, mask, cutoff=250):
|
117
|
+
"""
|
118
|
+
Masks all velocities above a cutoff. Note that
|
119
|
+
velocity is a 2-D array.
|
120
|
+
|
121
|
+
Args:
|
122
|
+
velocity (numpy array, integer): Velocity(depth, time) in mm/s
|
123
|
+
mask (numpy array, integer): Mask file
|
124
|
+
cutoff (parameter, integer): Cutoff in cm/s
|
125
|
+
|
126
|
+
Returns:
|
127
|
+
mask
|
128
|
+
"""
|
129
|
+
# Convert to mm/s
|
130
|
+
cutoff = cutoff * 10
|
131
|
+
mask[np.abs(velocity) > cutoff] = 1
|
132
|
+
return mask
|
133
|
+
|
134
|
+
|
135
|
+
def despike(velocity, mask, kernal_size=13, cutoff=3):
|
136
|
+
"""
|
137
|
+
Function to remove anomalous spikes in the data over a period of time.
|
138
|
+
A median filter is used to despike the data.
|
139
|
+
|
140
|
+
Args:
|
141
|
+
velocity (numpy array, integer): Velocity(depth, time) in mm/s
|
142
|
+
mask (numpy array, integer): Mask file
|
143
|
+
kernal_size (paramater, integer): Window size for rolling median filter
|
144
|
+
cutoff (parameter, integer): Number of standard deviations to identify spikes
|
145
|
+
|
146
|
+
Returns:
|
147
|
+
mask
|
148
|
+
"""
|
149
|
+
velocity = np.where(velocity == -32768, np.nan, velocity)
|
150
|
+
shape = np.shape(velocity)
|
151
|
+
for j in range(shape[0]):
|
152
|
+
# Apply median filter
|
153
|
+
filt = sp.signal.medfilt(velocity[j, :], kernal_size)
|
154
|
+
# Calculate absolute deviation from the rolling median
|
155
|
+
diff = np.abs(velocity[j, :] - filt)
|
156
|
+
# Calculate threshold for spikes based on standard deviation
|
157
|
+
std_dev = np.nanstd(diff)
|
158
|
+
spike_threshold = cutoff * std_dev
|
159
|
+
# Apply mask after identifying spikes
|
160
|
+
mask[j, :] = np.where(diff < spike_threshold, mask[j, :], 1)
|
161
|
+
return mask
|
162
|
+
|
163
|
+
|
164
|
+
def flatline(
|
165
|
+
velocity,
|
166
|
+
mask,
|
167
|
+
kernal_size=4,
|
168
|
+
cutoff=1,
|
169
|
+
):
|
170
|
+
"""
|
171
|
+
Function to check and remove velocities that are constant over a
|
172
|
+
period of time.
|
173
|
+
|
174
|
+
Args:
|
175
|
+
velocity (numpy arrray, integer): Velocity (depth, time)
|
176
|
+
mask (numpy array, integer): Mask file
|
177
|
+
kernal_size (parameter, integer): No. of ensembles over which flatline has to be detected
|
178
|
+
cutoff (parameter, integer): Permitted deviation in velocity
|
179
|
+
|
180
|
+
Returns:
|
181
|
+
mask
|
182
|
+
"""
|
183
|
+
index = 0
|
184
|
+
velocity = np.where(velocity == -32768, np.nan, velocity)
|
185
|
+
shape = np.shape(velocity)
|
186
|
+
dummymask = np.zeros(shape[1])
|
187
|
+
for j in range(shape[0]):
|
188
|
+
diff = np.diff(velocity[j, :])
|
189
|
+
diff = np.insert(diff, 0, 0)
|
190
|
+
dummymask[np.abs(diff) <= cutoff] = 1
|
191
|
+
for k, g in groupby(dummymask):
|
192
|
+
# subset_size = sum(1 for i in g)
|
193
|
+
subset_size = len(list(g))
|
194
|
+
if k == 1 and subset_size >= kernal_size:
|
195
|
+
mask[j, index : index + subset_size] = 1
|
196
|
+
index = index + subset_size
|
197
|
+
dummymask = np.zeros(shape[1])
|
198
|
+
index = 0
|
199
|
+
|
200
|
+
return mask
|