mt-metadata 0.3.4__py2.py3-none-any.whl → 0.3.6__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mt-metadata might be problematic. Click here for more details.
- mt_metadata/__init__.py +1 -1
- mt_metadata/base/metadata.py +8 -8
- mt_metadata/timeseries/filters/frequency_response_table_filter.py +10 -7
- mt_metadata/timeseries/stationxml/xml_channel_mt_channel.py +53 -1
- mt_metadata/timeseries/stationxml/xml_inventory_mt_experiment.py +4 -1
- mt_metadata/timeseries/tools/from_many_mt_files.py +15 -3
- mt_metadata/transfer_functions/core.py +96 -71
- mt_metadata/transfer_functions/io/edi/edi.py +7 -7
- mt_metadata/transfer_functions/io/emtfxml/emtfxml.py +4 -4
- mt_metadata/transfer_functions/processing/aurora/__init__.py +0 -1
- mt_metadata/transfer_functions/processing/aurora/band.py +23 -11
- mt_metadata/transfer_functions/processing/aurora/channel_nomenclature.py +4 -0
- mt_metadata/transfer_functions/processing/fourier_coefficients/standards/decimation.json +1 -1
- {mt_metadata-0.3.4.dist-info → mt_metadata-0.3.6.dist-info}/METADATA +3 -3
- {mt_metadata-0.3.4.dist-info → mt_metadata-0.3.6.dist-info}/RECORD +19 -20
- {mt_metadata-0.3.4.dist-info → mt_metadata-0.3.6.dist-info}/WHEEL +1 -1
- mt_metadata/timeseries/filters/channel_response_filter.py +0 -476
- {mt_metadata-0.3.4.dist-info → mt_metadata-0.3.6.dist-info}/AUTHORS.rst +0 -0
- {mt_metadata-0.3.4.dist-info → mt_metadata-0.3.6.dist-info}/LICENSE +0 -0
- {mt_metadata-0.3.4.dist-info → mt_metadata-0.3.6.dist-info}/top_level.txt +0 -0
mt_metadata/__init__.py
CHANGED
|
@@ -39,7 +39,7 @@ you should only have to changes these dictionaries.
|
|
|
39
39
|
|
|
40
40
|
__author__ = """Jared Peacock"""
|
|
41
41
|
__email__ = "jpeacock@usgs.gov"
|
|
42
|
-
__version__ = "0.3.
|
|
42
|
+
__version__ = "0.3.6"
|
|
43
43
|
|
|
44
44
|
# =============================================================================
|
|
45
45
|
# Imports
|
mt_metadata/base/metadata.py
CHANGED
|
@@ -300,7 +300,7 @@ class Base:
|
|
|
300
300
|
self.logger.exception(error)
|
|
301
301
|
raise MTSchemaError(error)
|
|
302
302
|
|
|
303
|
-
def _validate_option(self, name, option_list):
|
|
303
|
+
def _validate_option(self, name, value, option_list):
|
|
304
304
|
"""
|
|
305
305
|
validate the given attribute name agains possible options and check
|
|
306
306
|
for aliases
|
|
@@ -315,21 +315,21 @@ class Base:
|
|
|
315
315
|
:rtype: TYPE
|
|
316
316
|
|
|
317
317
|
"""
|
|
318
|
-
if
|
|
318
|
+
if value is None:
|
|
319
319
|
return True, False, None
|
|
320
320
|
options = [ss.lower() for ss in option_list]
|
|
321
321
|
other_possible = False
|
|
322
322
|
if "other" in options:
|
|
323
323
|
other_possible = True
|
|
324
|
-
if
|
|
324
|
+
if value.lower() in options:
|
|
325
325
|
return True, other_possible, None
|
|
326
|
-
elif
|
|
326
|
+
elif value.lower() not in options and other_possible:
|
|
327
327
|
msg = (
|
|
328
|
-
"{
|
|
329
|
-
+ " are allowed. Allowing {
|
|
328
|
+
f"Value '{value}' not found for metadata field '{name}' in options list {option_list}, but other options"
|
|
329
|
+
+ f" are allowed. Allowing {option_list} to be set to {value}."
|
|
330
330
|
)
|
|
331
331
|
return True, other_possible, msg
|
|
332
|
-
return False, other_possible, "{
|
|
332
|
+
return False, other_possible, f"Value '{value}' for metadata field '{name}' not found in options list {option_list}"
|
|
333
333
|
|
|
334
334
|
def __setattr__(self, name, value):
|
|
335
335
|
"""
|
|
@@ -399,7 +399,7 @@ class Base:
|
|
|
399
399
|
# check options
|
|
400
400
|
if v_dict["style"] == "controlled vocabulary":
|
|
401
401
|
options = v_dict["options"]
|
|
402
|
-
accept, other, msg = self._validate_option(value, options)
|
|
402
|
+
accept, other, msg = self._validate_option(name, value, options)
|
|
403
403
|
if not accept:
|
|
404
404
|
self.logger.error(msg.format(value, options))
|
|
405
405
|
raise MTSchemaError(msg.format(value, options))
|
|
@@ -30,7 +30,6 @@ attr_dict.add_dict(
|
|
|
30
30
|
# =============================================================================
|
|
31
31
|
|
|
32
32
|
|
|
33
|
-
|
|
34
33
|
class FrequencyResponseTableFilter(FilterBase):
|
|
35
34
|
"""
|
|
36
35
|
Phases should be in radians.
|
|
@@ -130,7 +129,6 @@ class FrequencyResponseTableFilter(FilterBase):
|
|
|
130
129
|
self._empirical_phases = np.array(value, dtype=float)
|
|
131
130
|
|
|
132
131
|
if self._empirical_phases.size > 0:
|
|
133
|
-
|
|
134
132
|
if self._empirical_phases.mean() > 1000 * np.pi / 2:
|
|
135
133
|
self.logger.warning(
|
|
136
134
|
"Phases appear to be in milli radians attempting to convert to radians"
|
|
@@ -216,12 +214,17 @@ class FrequencyResponseTableFilter(FilterBase):
|
|
|
216
214
|
:rtype: np.ndarray
|
|
217
215
|
|
|
218
216
|
"""
|
|
219
|
-
if (
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
217
|
+
if np.min(frequencies) < self.min_frequency:
|
|
218
|
+
# if there is a dc component skip it.
|
|
219
|
+
if np.min(frequencies) != 0:
|
|
220
|
+
self.logger.warning(
|
|
221
|
+
f"Extrapolating frequencies smaller ({np.min(frequencies)} Hz) "
|
|
222
|
+
f"than table frequencies ({self.min_frequency} Hz)."
|
|
223
|
+
)
|
|
224
|
+
if np.max(frequencies) > self.max_frequency:
|
|
223
225
|
self.logger.warning(
|
|
224
|
-
"Extrapolating
|
|
226
|
+
f"Extrapolating frequencies larger ({np.max(frequencies)} Hz) "
|
|
227
|
+
f"than table frequencies ({self.max_frequency} Hz)."
|
|
225
228
|
)
|
|
226
229
|
|
|
227
230
|
phase_response = interp1d(
|
|
@@ -8,6 +8,8 @@ Created on Fri Feb 19 16:14:41 2021
|
|
|
8
8
|
:license: MIT
|
|
9
9
|
|
|
10
10
|
"""
|
|
11
|
+
import copy
|
|
12
|
+
|
|
11
13
|
# =============================================================================
|
|
12
14
|
# Imports
|
|
13
15
|
# =============================================================================
|
|
@@ -24,6 +26,7 @@ from mt_metadata.timeseries.stationxml.utils import BaseTranslator
|
|
|
24
26
|
from mt_metadata.utils.units import get_unit_object
|
|
25
27
|
|
|
26
28
|
from obspy.core import inventory
|
|
29
|
+
from obspy import UTCDateTime
|
|
27
30
|
|
|
28
31
|
# =============================================================================
|
|
29
32
|
|
|
@@ -33,6 +36,15 @@ class XMLChannelMTChannel(BaseTranslator):
|
|
|
33
36
|
translate back and forth between StationXML Channel and MT Channel
|
|
34
37
|
"""
|
|
35
38
|
|
|
39
|
+
understood_sensor_types = [
|
|
40
|
+
"logger",
|
|
41
|
+
"magnetometer",
|
|
42
|
+
"induction coil",
|
|
43
|
+
"coil",
|
|
44
|
+
"dipole",
|
|
45
|
+
"electrode"
|
|
46
|
+
]
|
|
47
|
+
|
|
36
48
|
def __init__(self):
|
|
37
49
|
super().__init__()
|
|
38
50
|
|
|
@@ -109,7 +121,8 @@ class XMLChannelMTChannel(BaseTranslator):
|
|
|
109
121
|
# fill channel filters
|
|
110
122
|
mt_channel.filter.name = list(mt_filters.keys())
|
|
111
123
|
mt_channel.filter.applied = [True] * len(list(mt_filters.keys()))
|
|
112
|
-
|
|
124
|
+
if UTCDateTime(mt_channel.time_period.end) < UTCDateTime(mt_channel.time_period.start):
|
|
125
|
+
mt_channel.time_period.end = '2200-01-01T00:00:00+00:00'
|
|
113
126
|
return mt_channel, mt_filters
|
|
114
127
|
|
|
115
128
|
def mt_to_xml(self, mt_channel, filters_dict, hard_code=True):
|
|
@@ -217,6 +230,8 @@ class XMLChannelMTChannel(BaseTranslator):
|
|
|
217
230
|
:rtype: TYPE
|
|
218
231
|
|
|
219
232
|
"""
|
|
233
|
+
sensor.type = self._deduce_sensor_type(sensor)
|
|
234
|
+
|
|
220
235
|
if not sensor.type:
|
|
221
236
|
return mt_channel
|
|
222
237
|
|
|
@@ -566,3 +581,40 @@ class XMLChannelMTChannel(BaseTranslator):
|
|
|
566
581
|
xml_channel.calibration_units_description = unit_obj.name
|
|
567
582
|
|
|
568
583
|
return xml_channel
|
|
584
|
+
|
|
585
|
+
|
|
586
|
+
def _deduce_sensor_type(self, sensor):
|
|
587
|
+
"""
|
|
588
|
+
|
|
589
|
+
:param sensor: Information about a sensor, usually extractes from FDSN XML
|
|
590
|
+
:type sensor: obspy.core.inventory.util.Equipment
|
|
591
|
+
|
|
592
|
+
:return:
|
|
593
|
+
"""
|
|
594
|
+
original_sensor_type = sensor.type
|
|
595
|
+
# set sensor_type to be a string if it is None
|
|
596
|
+
if original_sensor_type is None:
|
|
597
|
+
sensor_type = "" # make a string
|
|
598
|
+
msg = f"Sensor {sensor} does not have field type attr"
|
|
599
|
+
self.logger.debug(msg)
|
|
600
|
+
else:
|
|
601
|
+
sensor_type = copy.deepcopy(original_sensor_type)
|
|
602
|
+
|
|
603
|
+
if sensor_type.lower() in self.understood_sensor_types:
|
|
604
|
+
return sensor_type
|
|
605
|
+
else:
|
|
606
|
+
self.logger.warning(f" sensor {sensor} type {sensor.type} not in {self.understood_sensor_types}")
|
|
607
|
+
|
|
608
|
+
# Try handling Bartington FGM at Earthscope ... this is a place holder for handling non-standard cases
|
|
609
|
+
if sensor.description == "Bartington 3-Axis Fluxgate Sensor":
|
|
610
|
+
sensor_type = "magnetometer"
|
|
611
|
+
elif sensor_type.lower() == "bartington":
|
|
612
|
+
sensor_type = "magnetometer"
|
|
613
|
+
|
|
614
|
+
|
|
615
|
+
# reset sensor_type to None it it was not handled
|
|
616
|
+
if not sensor_type:
|
|
617
|
+
sensor_type = original_sensor_type
|
|
618
|
+
self.logger.error("sensor type could not be resolved")
|
|
619
|
+
|
|
620
|
+
return sensor_type
|
|
@@ -170,7 +170,10 @@ class XMLInventoryMTExperiment:
|
|
|
170
170
|
xml_network = self.network_translator.mt_to_xml(mt_survey)
|
|
171
171
|
for mt_station in mt_survey.stations:
|
|
172
172
|
xml_station = self.station_translator.mt_to_xml(mt_station)
|
|
173
|
-
|
|
173
|
+
if mt_survey.country is not None:
|
|
174
|
+
xml_station.site.country = ",".join(
|
|
175
|
+
[str(country) for country in mt_survey.country]
|
|
176
|
+
)
|
|
174
177
|
for mt_run in mt_station.runs:
|
|
175
178
|
xml_station = self.add_run(
|
|
176
179
|
xml_station, mt_run, mt_survey.filters
|
|
@@ -12,7 +12,14 @@ from pathlib import Path
|
|
|
12
12
|
import pandas as pd
|
|
13
13
|
from xml.etree import cElementTree as et
|
|
14
14
|
|
|
15
|
-
from mt_metadata.timeseries import
|
|
15
|
+
from mt_metadata.timeseries import (
|
|
16
|
+
Experiment,
|
|
17
|
+
Survey,
|
|
18
|
+
Station,
|
|
19
|
+
Run,
|
|
20
|
+
Electric,
|
|
21
|
+
Magnetic,
|
|
22
|
+
)
|
|
16
23
|
|
|
17
24
|
from mt_metadata.timeseries.filters import (
|
|
18
25
|
PoleZeroFilter,
|
|
@@ -22,6 +29,7 @@ from mt_metadata.timeseries.filters import (
|
|
|
22
29
|
)
|
|
23
30
|
from mt_metadata.timeseries.stationxml import XMLInventoryMTExperiment
|
|
24
31
|
|
|
32
|
+
|
|
25
33
|
# =============================================================================
|
|
26
34
|
# Useful Class
|
|
27
35
|
# =============================================================================
|
|
@@ -182,7 +190,7 @@ class MT2StationXML(XMLInventoryMTExperiment):
|
|
|
182
190
|
channels_list = []
|
|
183
191
|
for ch in order:
|
|
184
192
|
for fn in rdf:
|
|
185
|
-
if ch in fn.name.lower():
|
|
193
|
+
if ch in fn.name[len(station) :].lower():
|
|
186
194
|
channels_list.append(fn)
|
|
187
195
|
break
|
|
188
196
|
|
|
@@ -196,7 +204,11 @@ class MT2StationXML(XMLInventoryMTExperiment):
|
|
|
196
204
|
:rtype: TYPE
|
|
197
205
|
|
|
198
206
|
"""
|
|
199
|
-
fn_dict = {
|
|
207
|
+
fn_dict = {
|
|
208
|
+
"survey": self.survey,
|
|
209
|
+
"filters": self.filters,
|
|
210
|
+
"stations": [],
|
|
211
|
+
}
|
|
200
212
|
if stations in [None, []]:
|
|
201
213
|
station_iterator = self.stations
|
|
202
214
|
else:
|
|
@@ -838,10 +838,12 @@ class TF:
|
|
|
838
838
|
].data.tolist()
|
|
839
839
|
if self.ex in outputs or self.ey in outputs or self.hz in outputs:
|
|
840
840
|
if np.all(
|
|
841
|
-
self._transfer_function.transfer_function.
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
841
|
+
self._transfer_function.transfer_function.loc[
|
|
842
|
+
dict(
|
|
843
|
+
input=self._ch_input_dict["tf"],
|
|
844
|
+
output=self._ch_output_dict["tf"],
|
|
845
|
+
)
|
|
846
|
+
].data
|
|
845
847
|
== 0
|
|
846
848
|
):
|
|
847
849
|
return False
|
|
@@ -857,9 +859,9 @@ class TF:
|
|
|
857
859
|
|
|
858
860
|
"""
|
|
859
861
|
if self.has_transfer_function():
|
|
860
|
-
ds = self.dataset.transfer_function.
|
|
861
|
-
input=self.hx_hy, output=self.ex_ey_hz
|
|
862
|
-
|
|
862
|
+
ds = self.dataset.transfer_function.loc[
|
|
863
|
+
dict(input=self.hx_hy, output=self.ex_ey_hz)
|
|
864
|
+
]
|
|
863
865
|
for key, mkey in self._dataset_attr_dict.items():
|
|
864
866
|
obj, attr = mkey.split(".", 1)
|
|
865
867
|
value = getattr(self, obj).get_attr_from_name(attr)
|
|
@@ -889,9 +891,9 @@ class TF:
|
|
|
889
891
|
|
|
890
892
|
"""
|
|
891
893
|
if self.has_transfer_function():
|
|
892
|
-
ds = self.dataset.transfer_function_error.
|
|
893
|
-
input=self.hx_hy, output=self.ex_ey_hz
|
|
894
|
-
|
|
894
|
+
ds = self.dataset.transfer_function_error.loc[
|
|
895
|
+
dict(input=self.hx_hy, output=self.ex_ey_hz)
|
|
896
|
+
]
|
|
895
897
|
for key, mkey in self._dataset_attr_dict.items():
|
|
896
898
|
obj, attr = mkey.split(".", 1)
|
|
897
899
|
value = getattr(self, obj).get_attr_from_name(attr)
|
|
@@ -921,9 +923,9 @@ class TF:
|
|
|
921
923
|
|
|
922
924
|
"""
|
|
923
925
|
if self.has_transfer_function():
|
|
924
|
-
ds = self.dataset.transfer_function_model_error.
|
|
925
|
-
input=self.hx_hy, output=self.ex_ey_hz
|
|
926
|
-
|
|
926
|
+
ds = self.dataset.transfer_function_model_error.loc[
|
|
927
|
+
dict(input=self.hx_hy, output=self.ex_ey_hz)
|
|
928
|
+
]
|
|
927
929
|
for key, mkey in self._dataset_attr_dict.items():
|
|
928
930
|
obj, attr = mkey.split(".", 1)
|
|
929
931
|
value = getattr(self, obj).get_attr_from_name(attr)
|
|
@@ -958,10 +960,12 @@ class TF:
|
|
|
958
960
|
].data.tolist()
|
|
959
961
|
if self.ex in outputs or self.ey in outputs:
|
|
960
962
|
if np.all(
|
|
961
|
-
self._transfer_function.transfer_function.
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
963
|
+
self._transfer_function.transfer_function.loc[
|
|
964
|
+
dict(
|
|
965
|
+
input=self._ch_input_dict["impedance"],
|
|
966
|
+
output=self._ch_output_dict["impedance"],
|
|
967
|
+
)
|
|
968
|
+
].data
|
|
965
969
|
== 0
|
|
966
970
|
):
|
|
967
971
|
return False
|
|
@@ -977,10 +981,12 @@ class TF:
|
|
|
977
981
|
|
|
978
982
|
"""
|
|
979
983
|
if self.has_impedance():
|
|
980
|
-
z = self.dataset.transfer_function.
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
+
z = self.dataset.transfer_function.loc[
|
|
985
|
+
dict(
|
|
986
|
+
input=self._ch_input_dict["impedance"],
|
|
987
|
+
output=self._ch_output_dict["impedance"],
|
|
988
|
+
)
|
|
989
|
+
]
|
|
984
990
|
z.name = "impedance"
|
|
985
991
|
for key, mkey in self._dataset_attr_dict.items():
|
|
986
992
|
obj, attr = mkey.split(".", 1)
|
|
@@ -1011,10 +1017,12 @@ class TF:
|
|
|
1011
1017
|
|
|
1012
1018
|
"""
|
|
1013
1019
|
if self.has_impedance():
|
|
1014
|
-
z_err = self.dataset.transfer_function_error.
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1020
|
+
z_err = self.dataset.transfer_function_error.loc[
|
|
1021
|
+
dict(
|
|
1022
|
+
input=self._ch_input_dict["impedance"],
|
|
1023
|
+
output=self._ch_output_dict["impedance"],
|
|
1024
|
+
)
|
|
1025
|
+
]
|
|
1018
1026
|
z_err.name = "impedance_error"
|
|
1019
1027
|
|
|
1020
1028
|
for key, mkey in self._dataset_attr_dict.items():
|
|
@@ -1046,10 +1054,12 @@ class TF:
|
|
|
1046
1054
|
|
|
1047
1055
|
"""
|
|
1048
1056
|
if self.has_impedance():
|
|
1049
|
-
z_err = self.dataset.transfer_function_model_error.
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1057
|
+
z_err = self.dataset.transfer_function_model_error.loc[
|
|
1058
|
+
dict(
|
|
1059
|
+
input=self._ch_input_dict["impedance"],
|
|
1060
|
+
output=self._ch_output_dict["impedance"],
|
|
1061
|
+
)
|
|
1062
|
+
]
|
|
1053
1063
|
z_err.name = "impedance_model_error"
|
|
1054
1064
|
|
|
1055
1065
|
for key, mkey in self._dataset_attr_dict.items():
|
|
@@ -1087,10 +1097,12 @@ class TF:
|
|
|
1087
1097
|
if self.hz in outputs:
|
|
1088
1098
|
if np.all(
|
|
1089
1099
|
np.nan_to_num(
|
|
1090
|
-
self._transfer_function.transfer_function.
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1100
|
+
self._transfer_function.transfer_function.loc[
|
|
1101
|
+
dict(
|
|
1102
|
+
input=self._ch_input_dict["tipper"],
|
|
1103
|
+
output=self._ch_output_dict["tipper"],
|
|
1104
|
+
)
|
|
1105
|
+
].data
|
|
1094
1106
|
)
|
|
1095
1107
|
== 0
|
|
1096
1108
|
):
|
|
@@ -1107,10 +1119,12 @@ class TF:
|
|
|
1107
1119
|
|
|
1108
1120
|
"""
|
|
1109
1121
|
if self.has_tipper():
|
|
1110
|
-
t = self.dataset.transfer_function.
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1122
|
+
t = self.dataset.transfer_function.loc[
|
|
1123
|
+
dict(
|
|
1124
|
+
input=self._ch_input_dict["tipper"],
|
|
1125
|
+
output=self._ch_output_dict["tipper"],
|
|
1126
|
+
)
|
|
1127
|
+
]
|
|
1114
1128
|
t.name = "tipper"
|
|
1115
1129
|
|
|
1116
1130
|
for key, mkey in self._dataset_attr_dict.items():
|
|
@@ -1141,10 +1155,12 @@ class TF:
|
|
|
1141
1155
|
|
|
1142
1156
|
"""
|
|
1143
1157
|
if self.has_tipper():
|
|
1144
|
-
t = self.dataset.transfer_function_error.
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1158
|
+
t = self.dataset.transfer_function_error.loc[
|
|
1159
|
+
dict(
|
|
1160
|
+
input=self._ch_input_dict["tipper"],
|
|
1161
|
+
output=self._ch_output_dict["tipper"],
|
|
1162
|
+
)
|
|
1163
|
+
]
|
|
1148
1164
|
t.name = "tipper_error"
|
|
1149
1165
|
for key, mkey in self._dataset_attr_dict.items():
|
|
1150
1166
|
obj, attr = mkey.split(".", 1)
|
|
@@ -1174,10 +1190,12 @@ class TF:
|
|
|
1174
1190
|
|
|
1175
1191
|
"""
|
|
1176
1192
|
if self.has_tipper():
|
|
1177
|
-
t = self.dataset.transfer_function_model_error.
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1193
|
+
t = self.dataset.transfer_function_model_error.loc[
|
|
1194
|
+
dict(
|
|
1195
|
+
input=self._ch_input_dict["tipper"],
|
|
1196
|
+
output=self._ch_output_dict["tipper"],
|
|
1197
|
+
)
|
|
1198
|
+
]
|
|
1181
1199
|
t.name = "tipper_model_error"
|
|
1182
1200
|
for key, mkey in self._dataset_attr_dict.items():
|
|
1183
1201
|
obj, attr = mkey.split(".", 1)
|
|
@@ -1209,10 +1227,12 @@ class TF:
|
|
|
1209
1227
|
"""
|
|
1210
1228
|
|
|
1211
1229
|
if np.all(
|
|
1212
|
-
self._transfer_function.inverse_signal_power.
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1230
|
+
self._transfer_function.inverse_signal_power.loc[
|
|
1231
|
+
dict(
|
|
1232
|
+
input=self._ch_input_dict["isp"],
|
|
1233
|
+
output=self._ch_output_dict["isp"],
|
|
1234
|
+
)
|
|
1235
|
+
].data
|
|
1216
1236
|
== 0
|
|
1217
1237
|
):
|
|
1218
1238
|
return False
|
|
@@ -1221,10 +1241,12 @@ class TF:
|
|
|
1221
1241
|
@property
|
|
1222
1242
|
def inverse_signal_power(self):
|
|
1223
1243
|
if self.has_inverse_signal_power():
|
|
1224
|
-
ds = self.dataset.inverse_signal_power.
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1244
|
+
ds = self.dataset.inverse_signal_power.loc[
|
|
1245
|
+
dict(
|
|
1246
|
+
input=self._ch_input_dict["isp"],
|
|
1247
|
+
output=self._ch_output_dict["isp"],
|
|
1248
|
+
)
|
|
1249
|
+
]
|
|
1228
1250
|
for key, mkey in self._dataset_attr_dict.items():
|
|
1229
1251
|
obj, attr = mkey.split(".", 1)
|
|
1230
1252
|
value = getattr(self, obj).get_attr_from_name(attr)
|
|
@@ -1260,10 +1282,12 @@ class TF:
|
|
|
1260
1282
|
"""
|
|
1261
1283
|
|
|
1262
1284
|
if np.all(
|
|
1263
|
-
self._transfer_function.residual_covariance.
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1285
|
+
self._transfer_function.residual_covariance.loc[
|
|
1286
|
+
dict(
|
|
1287
|
+
input=self._ch_input_dict["res"],
|
|
1288
|
+
output=self._ch_output_dict["res"],
|
|
1289
|
+
)
|
|
1290
|
+
].data
|
|
1267
1291
|
== 0
|
|
1268
1292
|
):
|
|
1269
1293
|
return False
|
|
@@ -1272,10 +1296,12 @@ class TF:
|
|
|
1272
1296
|
@property
|
|
1273
1297
|
def residual_covariance(self):
|
|
1274
1298
|
if self.has_residual_covariance():
|
|
1275
|
-
ds = self.dataset.residual_covariance.
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1299
|
+
ds = self.dataset.residual_covariance.loc[
|
|
1300
|
+
dict(
|
|
1301
|
+
input=self._ch_input_dict["res"],
|
|
1302
|
+
output=self._ch_output_dict["res"],
|
|
1303
|
+
)
|
|
1304
|
+
]
|
|
1279
1305
|
for key, mkey in self._dataset_attr_dict.items():
|
|
1280
1306
|
obj, attr = mkey.split(".", 1)
|
|
1281
1307
|
value = getattr(self, obj).get_attr_from_name(attr)
|
|
@@ -2222,19 +2248,19 @@ class TF:
|
|
|
2222
2248
|
setattr(self, tf_key, getattr(zmm_obj, j_key))
|
|
2223
2249
|
self._transfer_function["transfer_function"].loc[
|
|
2224
2250
|
dict(input=zmm_obj.input_channels, output=zmm_obj.output_channels)
|
|
2225
|
-
] = zmm_obj.dataset.transfer_function.
|
|
2226
|
-
input=zmm_obj.input_channels, output=zmm_obj.output_channels
|
|
2227
|
-
|
|
2251
|
+
] = zmm_obj.dataset.transfer_function.loc[
|
|
2252
|
+
dict(input=zmm_obj.input_channels, output=zmm_obj.output_channels)
|
|
2253
|
+
]
|
|
2228
2254
|
self._transfer_function["inverse_signal_power"].loc[
|
|
2229
2255
|
dict(input=zmm_obj.input_channels, output=zmm_obj.input_channels)
|
|
2230
|
-
] = zmm_obj.dataset.inverse_signal_power.
|
|
2231
|
-
input=zmm_obj.input_channels, output=zmm_obj.input_channels
|
|
2232
|
-
|
|
2256
|
+
] = zmm_obj.dataset.inverse_signal_power.loc[
|
|
2257
|
+
dict(input=zmm_obj.input_channels, output=zmm_obj.input_channels)
|
|
2258
|
+
]
|
|
2233
2259
|
self._transfer_function["residual_covariance"].loc[
|
|
2234
2260
|
dict(input=zmm_obj.output_channels, output=zmm_obj.output_channels)
|
|
2235
|
-
] = zmm_obj.dataset.residual_covariance.
|
|
2236
|
-
input=zmm_obj.output_channels, output=zmm_obj.output_channels
|
|
2237
|
-
|
|
2261
|
+
] = zmm_obj.dataset.residual_covariance.loc[
|
|
2262
|
+
dict(input=zmm_obj.output_channels, output=zmm_obj.output_channels)
|
|
2263
|
+
]
|
|
2238
2264
|
|
|
2239
2265
|
self._compute_error_from_covariance()
|
|
2240
2266
|
self._rotation_angle = -1 * zmm_obj.declination
|
|
@@ -2362,4 +2388,3 @@ class TF:
|
|
|
2362
2388
|
|
|
2363
2389
|
class TFError(Exception):
|
|
2364
2390
|
pass
|
|
2365
|
-
pass
|
|
@@ -300,20 +300,20 @@ class EDI(object):
|
|
|
300
300
|
|
|
301
301
|
self._read_data()
|
|
302
302
|
|
|
303
|
-
if self.Header.lat
|
|
303
|
+
if self.Header.lat in [None, 0.0]:
|
|
304
304
|
self.Header.lat = self.Measurement.reflat
|
|
305
305
|
self.logger.debug(
|
|
306
|
-
"Got latitude from reflat for {
|
|
306
|
+
f"Got latitude from reflat for {self.Header.dataid}"
|
|
307
307
|
)
|
|
308
|
-
if self.Header.lon
|
|
308
|
+
if self.Header.lon in [None, 0.0]:
|
|
309
309
|
self.Header.lon = self.Measurement.reflon
|
|
310
310
|
self.logger.debug(
|
|
311
|
-
"Got longitude from reflon for {
|
|
311
|
+
f"Got longitude from reflon for {self.Header.dataid}"
|
|
312
312
|
)
|
|
313
|
-
if self.Header.elev
|
|
313
|
+
if self.Header.elev in [None, 0.0]:
|
|
314
314
|
self.Header.elev = self.Measurement.refelev
|
|
315
315
|
self.logger.debug(
|
|
316
|
-
"Got elevation from refelev for {
|
|
316
|
+
f"Got elevation from refelev for {self.Header.dataid}"
|
|
317
317
|
)
|
|
318
318
|
|
|
319
319
|
if self.elev in [0, None] and get_elevation:
|
|
@@ -1203,7 +1203,7 @@ class EDI(object):
|
|
|
1203
1203
|
if k in ["processing_parameters"]:
|
|
1204
1204
|
for item in v:
|
|
1205
1205
|
self.Info.info_list.append(
|
|
1206
|
-
f"transfer_function.{item.replace('=', ' = ')}"
|
|
1206
|
+
f"transfer_function.processing_parameters.{item.replace('=', ' = ')}"
|
|
1207
1207
|
)
|
|
1208
1208
|
else:
|
|
1209
1209
|
self.Info.info_list.append(f"transfer_function.{k} = {v}")
|
|
@@ -1211,7 +1211,7 @@ class EMTFXML(emtf_xml.EMTF):
|
|
|
1211
1211
|
self.logger.warning(
|
|
1212
1212
|
f"Cannot set processing info attribute {param}"
|
|
1213
1213
|
)
|
|
1214
|
-
self.logger.exception(error)
|
|
1214
|
+
# self.logger.exception(error)
|
|
1215
1215
|
elif "magnetometer" in key:
|
|
1216
1216
|
index = int(key.split("_")[1].split(".")[0])
|
|
1217
1217
|
key = key.split(".", 1)[1:]
|
|
@@ -1227,7 +1227,7 @@ class EMTFXML(emtf_xml.EMTF):
|
|
|
1227
1227
|
self.logger.warning(
|
|
1228
1228
|
f"Cannot set processing info attribute {param}"
|
|
1229
1229
|
)
|
|
1230
|
-
self.logger.exception(error)
|
|
1230
|
+
# self.logger.exception(error)
|
|
1231
1231
|
else:
|
|
1232
1232
|
try:
|
|
1233
1233
|
run.set_attr_from_name(key, value)
|
|
@@ -1235,7 +1235,7 @@ class EMTFXML(emtf_xml.EMTF):
|
|
|
1235
1235
|
self.logger.warning(
|
|
1236
1236
|
f"Cannot set processing info attribute {param}"
|
|
1237
1237
|
)
|
|
1238
|
-
self.logger.exception(error)
|
|
1238
|
+
# self.logger.exception(error)
|
|
1239
1239
|
else:
|
|
1240
1240
|
try:
|
|
1241
1241
|
self.processing_info.set_attr_from_name(key, value)
|
|
@@ -1243,7 +1243,7 @@ class EMTFXML(emtf_xml.EMTF):
|
|
|
1243
1243
|
self.logger.warning(
|
|
1244
1244
|
f"Cannot set processing info attribute {param}"
|
|
1245
1245
|
)
|
|
1246
|
-
self.logger.exception(error)
|
|
1246
|
+
# self.logger.exception(error)
|
|
1247
1247
|
|
|
1248
1248
|
self.site.run_list = sm.transfer_function.runs_processed
|
|
1249
1249
|
|