mt-metadata 0.3.9__py2.py3-none-any.whl → 0.4.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mt-metadata might be problematic. Click here for more details.

Files changed (95) hide show
  1. mt_metadata/__init__.py +1 -1
  2. mt_metadata/base/helpers.py +84 -9
  3. mt_metadata/base/metadata.py +137 -65
  4. mt_metadata/features/__init__.py +14 -0
  5. mt_metadata/features/coherence.py +303 -0
  6. mt_metadata/features/cross_powers.py +29 -0
  7. mt_metadata/features/fc_coherence.py +81 -0
  8. mt_metadata/features/feature.py +72 -0
  9. mt_metadata/features/feature_decimation_channel.py +26 -0
  10. mt_metadata/features/feature_fc.py +24 -0
  11. mt_metadata/{transfer_functions/processing/aurora/decimation.py → features/feature_fc_run.py} +9 -4
  12. mt_metadata/features/feature_ts.py +24 -0
  13. mt_metadata/{transfer_functions/processing/aurora/window.py → features/feature_ts_run.py} +11 -18
  14. mt_metadata/features/standards/__init__.py +6 -0
  15. mt_metadata/features/standards/base_feature.json +46 -0
  16. mt_metadata/features/standards/coherence.json +57 -0
  17. mt_metadata/features/standards/fc_coherence.json +57 -0
  18. mt_metadata/features/standards/feature_decimation_channel.json +68 -0
  19. mt_metadata/features/standards/feature_fc_run.json +35 -0
  20. mt_metadata/features/standards/feature_ts_run.json +35 -0
  21. mt_metadata/features/standards/feature_weighting_window.json +46 -0
  22. mt_metadata/features/standards/weight_kernel.json +46 -0
  23. mt_metadata/features/standards/weights.json +101 -0
  24. mt_metadata/features/test_helpers/channel_weight_specs_example.json +156 -0
  25. mt_metadata/features/weights/__init__.py +0 -0
  26. mt_metadata/features/weights/base.py +44 -0
  27. mt_metadata/features/weights/channel_weight_spec.py +209 -0
  28. mt_metadata/features/weights/feature_weight_spec.py +194 -0
  29. mt_metadata/features/weights/monotonic_weight_kernel.py +275 -0
  30. mt_metadata/features/weights/standards/__init__.py +6 -0
  31. mt_metadata/features/weights/standards/activation_monotonic_weight_kernel.json +38 -0
  32. mt_metadata/features/weights/standards/base.json +36 -0
  33. mt_metadata/features/weights/standards/channel_weight_spec.json +35 -0
  34. mt_metadata/features/weights/standards/composite.json +36 -0
  35. mt_metadata/features/weights/standards/feature_weight_spec.json +13 -0
  36. mt_metadata/features/weights/standards/monotonic_weight_kernel.json +49 -0
  37. mt_metadata/features/weights/standards/taper_monotonic_weight_kernel.json +16 -0
  38. mt_metadata/features/weights/taper_weight_kernel.py +60 -0
  39. mt_metadata/helper_functions.py +69 -0
  40. mt_metadata/timeseries/filters/channel_response.py +77 -37
  41. mt_metadata/timeseries/filters/coefficient_filter.py +6 -5
  42. mt_metadata/timeseries/filters/filter_base.py +11 -15
  43. mt_metadata/timeseries/filters/fir_filter.py +8 -1
  44. mt_metadata/timeseries/filters/frequency_response_table_filter.py +26 -11
  45. mt_metadata/timeseries/filters/helper_functions.py +0 -2
  46. mt_metadata/timeseries/filters/obspy_stages.py +4 -1
  47. mt_metadata/timeseries/filters/pole_zero_filter.py +9 -5
  48. mt_metadata/timeseries/filters/time_delay_filter.py +8 -1
  49. mt_metadata/timeseries/location.py +20 -5
  50. mt_metadata/timeseries/person.py +14 -7
  51. mt_metadata/timeseries/standards/person.json +1 -1
  52. mt_metadata/timeseries/standards/run.json +2 -2
  53. mt_metadata/timeseries/station.py +4 -2
  54. mt_metadata/timeseries/stationxml/__init__.py +5 -0
  55. mt_metadata/timeseries/stationxml/xml_channel_mt_channel.py +25 -27
  56. mt_metadata/timeseries/stationxml/xml_inventory_mt_experiment.py +16 -47
  57. mt_metadata/timeseries/stationxml/xml_station_mt_station.py +25 -24
  58. mt_metadata/transfer_functions/__init__.py +3 -0
  59. mt_metadata/transfer_functions/core.py +8 -11
  60. mt_metadata/transfer_functions/io/emtfxml/metadata/location.py +5 -0
  61. mt_metadata/transfer_functions/io/emtfxml/metadata/provenance.py +14 -3
  62. mt_metadata/transfer_functions/io/tools.py +2 -0
  63. mt_metadata/transfer_functions/io/zonge/metadata/header.py +1 -1
  64. mt_metadata/transfer_functions/io/zonge/metadata/standards/header.json +1 -1
  65. mt_metadata/transfer_functions/io/zonge/metadata/standards/job.json +2 -2
  66. mt_metadata/transfer_functions/io/zonge/zonge.py +19 -23
  67. mt_metadata/transfer_functions/processing/__init__.py +2 -1
  68. mt_metadata/transfer_functions/processing/aurora/__init__.py +2 -4
  69. mt_metadata/transfer_functions/processing/aurora/band.py +46 -125
  70. mt_metadata/transfer_functions/processing/aurora/channel_nomenclature.py +27 -20
  71. mt_metadata/transfer_functions/processing/aurora/decimation_level.py +324 -152
  72. mt_metadata/transfer_functions/processing/aurora/frequency_bands.py +230 -0
  73. mt_metadata/transfer_functions/processing/aurora/processing.py +3 -3
  74. mt_metadata/transfer_functions/processing/aurora/run.py +32 -7
  75. mt_metadata/transfer_functions/processing/aurora/standards/decimation_level.json +7 -73
  76. mt_metadata/transfer_functions/processing/aurora/stations.py +33 -4
  77. mt_metadata/transfer_functions/processing/fourier_coefficients/decimation.py +176 -178
  78. mt_metadata/transfer_functions/processing/fourier_coefficients/fc.py +11 -9
  79. mt_metadata/transfer_functions/processing/fourier_coefficients/standards/decimation.json +1 -111
  80. mt_metadata/transfer_functions/processing/short_time_fourier_transform.py +64 -0
  81. mt_metadata/transfer_functions/processing/standards/__init__.py +6 -0
  82. mt_metadata/transfer_functions/processing/standards/short_time_fourier_transform.json +94 -0
  83. mt_metadata/transfer_functions/processing/{aurora/standards/decimation.json → standards/time_series_decimation.json} +17 -6
  84. mt_metadata/transfer_functions/processing/{aurora/standards → standards}/window.json +13 -2
  85. mt_metadata/transfer_functions/processing/time_series_decimation.py +50 -0
  86. mt_metadata/transfer_functions/processing/window.py +118 -0
  87. mt_metadata/transfer_functions/tf/station.py +17 -1
  88. mt_metadata/utils/mttime.py +22 -3
  89. mt_metadata/utils/validators.py +4 -2
  90. {mt_metadata-0.3.9.dist-info → mt_metadata-0.4.0.dist-info}/METADATA +39 -15
  91. {mt_metadata-0.3.9.dist-info → mt_metadata-0.4.0.dist-info}/RECORD +95 -55
  92. {mt_metadata-0.3.9.dist-info → mt_metadata-0.4.0.dist-info}/WHEEL +1 -1
  93. {mt_metadata-0.3.9.dist-info → mt_metadata-0.4.0.dist-info}/AUTHORS.rst +0 -0
  94. {mt_metadata-0.3.9.dist-info → mt_metadata-0.4.0.dist-info}/LICENSE +0 -0
  95. {mt_metadata-0.3.9.dist-info → mt_metadata-0.4.0.dist-info}/top_level.txt +0 -0
@@ -9,6 +9,7 @@ the frequency domain.
9
9
  .. note:: Time Delay filters should be applied in the time domain
10
10
  otherwise bad things can happen.
11
11
  """
12
+
12
13
  # =============================================================================
13
14
  # Imports
14
15
  # =============================================================================
@@ -16,6 +17,7 @@ from copy import deepcopy
16
17
  import numpy as np
17
18
 
18
19
  from mt_metadata.base import Base, get_schema
20
+ from mt_metadata.base.helpers import requires
19
21
  from mt_metadata.timeseries.filters.standards import SCHEMA_FN_PATHS
20
22
  from mt_metadata.timeseries.filters import (
21
23
  PoleZeroFilter,
@@ -24,10 +26,14 @@ from mt_metadata.timeseries.filters import (
24
26
  FrequencyResponseTableFilter,
25
27
  FIRFilter,
26
28
  )
27
- from mt_metadata.timeseries.filters.filter_base import FilterBase
29
+
28
30
  from mt_metadata.utils.units import get_unit_object
29
31
  from mt_metadata.timeseries.filters.plotting_helpers import plot_response
30
- from obspy.core import inventory
32
+
33
+ try:
34
+ from obspy.core import inventory
35
+ except ImportError:
36
+ inventory = None
31
37
 
32
38
  # =============================================================================
33
39
  attr_dict = get_schema("channel_response", SCHEMA_FN_PATHS)
@@ -63,7 +69,6 @@ class ChannelResponse(Base):
63
69
  def __repr__(self):
64
70
  return self.__str__()
65
71
 
66
-
67
72
  @property
68
73
  def filters_list(self):
69
74
  """filters list"""
@@ -96,7 +101,9 @@ class ChannelResponse(Base):
96
101
  elif isinstance(value, (list, tuple, np.ndarray)):
97
102
  self._frequencies = np.array(value, dtype=float)
98
103
  else:
99
- msg = f"input values must be an list, tuple, or np.ndarray, not {type(value)}"
104
+ msg = (
105
+ f"input values must be an list, tuple, or np.ndarray, not {type(value)}"
106
+ )
100
107
  self.logger.error(msg)
101
108
  raise TypeError(msg)
102
109
 
@@ -146,9 +153,7 @@ class ChannelResponse(Base):
146
153
  if is_supported_filter(item):
147
154
  return_list.append(item)
148
155
  else:
149
- fails.append(
150
- f"Item is not a supported filter type, {type(item)}"
151
- )
156
+ fails.append(f"Item is not a supported filter type, {type(item)}")
152
157
 
153
158
  if fails:
154
159
  raise TypeError(", ".join(fails))
@@ -179,7 +184,7 @@ class ChannelResponse(Base):
179
184
  def normalization_frequency(self):
180
185
  """get normalization frequency from ZPK or FAP filter"""
181
186
 
182
- if self._normalization_frequency == 0.0:
187
+ if self._normalization_frequency in [0.0, None]:
183
188
  if self.pass_band is not None:
184
189
  return np.round(10 ** np.mean(np.log10(self.pass_band)), 3)
185
190
 
@@ -198,9 +203,7 @@ class ChannelResponse(Base):
198
203
  :return: all the non-time_delay filters as a list
199
204
 
200
205
  """
201
- non_delay_filters = [
202
- x for x in self.filters_list if x.type != "time delay"
203
- ]
206
+ non_delay_filters = [x for x in self.filters_list if x.type != "time delay"]
204
207
  return non_delay_filters
205
208
 
206
209
  @property
@@ -226,18 +229,26 @@ class ChannelResponse(Base):
226
229
  total_delay += delay_filter.delay
227
230
  return total_delay
228
231
 
229
- def get_indices_of_filters_to_remove(self, include_decimation=False, include_delay=False):
232
+ def get_indices_of_filters_to_remove(
233
+ self, include_decimation=False, include_delay=False
234
+ ):
230
235
  indices = list(np.arange(len(self.filters_list)))
231
236
 
232
237
  if not include_delay:
233
- indices = [i for i in indices if self.filters_list[i].type != "time delay"]
238
+ indices = [
239
+ i for i in indices if self.filters_list[i].type != "time delay"
240
+ ]
234
241
 
235
242
  if not include_decimation:
236
- indices = [i for i in indices if not self.filters_list[i].decimation_active]
243
+ indices = [
244
+ i for i in indices if not self.filters_list[i].decimation_active
245
+ ]
237
246
 
238
247
  return indices
239
248
 
240
- def get_list_of_filters_to_remove(self, include_decimation=False, include_delay=False):
249
+ def get_list_of_filters_to_remove(
250
+ self, include_decimation=False, include_delay=False
251
+ ):
241
252
  """
242
253
 
243
254
  :param include_decimation: bool
@@ -250,8 +261,9 @@ class ChannelResponse(Base):
250
261
  # inverse_filters = [x.inverse() for x in self.filters_list]
251
262
  # self.filters_list = inverse_filters
252
263
  """
253
- indices = self.get_indices_of_filters_to_remove(include_decimation=include_decimation,
254
- include_delay=include_delay)
264
+ indices = self.get_indices_of_filters_to_remove(
265
+ include_decimation=include_decimation, include_delay=include_delay
266
+ )
255
267
  return [self.filters_list[i] for i in indices]
256
268
 
257
269
  def complex_response(
@@ -287,13 +299,17 @@ class ChannelResponse(Base):
287
299
 
288
300
  # make filters list if not supplied
289
301
  if filters_list is None:
290
- self.logger.warning("Filters list not provided, building list assuming all are applied")
302
+ self.logger.warning(
303
+ "Filters list not provided, building list assuming all are applied"
304
+ )
291
305
  filters_list = self.get_list_of_filters_to_remove(
292
- include_decimation=include_decimation,
293
- include_delay=include_delay)
306
+ include_decimation=include_decimation, include_delay=include_delay
307
+ )
294
308
 
295
309
  if len(filters_list) == 0:
296
- self.logger.warning(f"No filters associated with {self.__class__}, returning 1")
310
+ self.logger.warning(
311
+ f"No filters associated with {self.__class__}, returning 1"
312
+ )
297
313
  return np.ones(len(self.frequencies), dtype=complex)
298
314
 
299
315
  # define the product of all filters as the total response function
@@ -305,10 +321,7 @@ class ChannelResponse(Base):
305
321
  result /= np.max(np.abs(result))
306
322
  return result
307
323
 
308
-
309
- def compute_instrument_sensitivity(
310
- self, normalization_frequency=None, sig_figs=6
311
- ):
324
+ def compute_instrument_sensitivity(self, normalization_frequency=None, sig_figs=16):
312
325
  """
313
326
  Compute the StationXML instrument sensitivity for the given normalization frequency
314
327
 
@@ -322,18 +335,41 @@ class ChannelResponse(Base):
322
335
  self.normalization_frequency = normalization_frequency
323
336
  sensitivity = 1.0
324
337
  for mt_filter in self.filters_list:
325
- complex_response = mt_filter.complex_response(
326
- self.normalization_frequency
327
- )
338
+ complex_response = mt_filter.complex_response(self.normalization_frequency)
328
339
  sensitivity *= complex_response.astype(complex)
329
340
  try:
330
341
  sensitivity = np.abs(sensitivity[0])
331
342
  except (IndexError, TypeError):
332
343
  sensitivity = np.abs(sensitivity)
333
344
 
334
- return round(
335
- sensitivity, sig_figs - int(np.floor(np.log10(abs(sensitivity))))
336
- )
345
+ return round(sensitivity, sig_figs - int(np.floor(np.log10(abs(sensitivity)))))
346
+
347
+ def compute_total_gain(self, sig_figs=16):
348
+ """
349
+ Computing the total sensitivity seems to be different than just adding all the gains together.
350
+ Overall the total sensitivity is useless for MT cause they don't have the ability to use the units.
351
+ So if a person downloads data from the DMC, they will simply use the filters provided.
352
+
353
+ Parameters
354
+ ----------
355
+ sig_figs : int, optional
356
+ _description_, by default 6
357
+
358
+ Returns
359
+ -------
360
+ _type_
361
+ _description_
362
+
363
+ Raises
364
+ ------
365
+ ValueError
366
+ _description_
367
+ """
368
+ total_gain = 1
369
+ for mt_filter in self.filters_list:
370
+ total_gain *= mt_filter.gain
371
+
372
+ return round(total_gain, sig_figs - int(np.floor(np.log10(abs(total_gain)))))
337
373
 
338
374
  @property
339
375
  def units_in(self):
@@ -355,7 +391,6 @@ class ChannelResponse(Base):
355
391
  else:
356
392
  return self.filters_list[-1].units_out
357
393
 
358
-
359
394
  def _check_consistency_of_units(self):
360
395
  """
361
396
  confirms that the input and output units of each filter state are consistent
@@ -375,6 +410,7 @@ class ChannelResponse(Base):
375
410
 
376
411
  return True
377
412
 
413
+ @requires(obspy=inventory)
378
414
  def to_obspy(self, sample_rate=1):
379
415
  """
380
416
  Output :class:`obspy.core.inventory.InstrumentSensitivity` object that
@@ -387,6 +423,13 @@ class ChannelResponse(Base):
387
423
 
388
424
  """
389
425
  total_sensitivity = self.compute_instrument_sensitivity()
426
+ total_gain = self.compute_total_gain()
427
+
428
+ if total_sensitivity != total_gain:
429
+ self.logger.info(
430
+ f"total sensitivity {total_sensitivity} != total gain {total_gain}. Using total_gain."
431
+ )
432
+ total_sensitivity = total_gain
390
433
 
391
434
  units_in_obj = get_unit_object(self.units_in)
392
435
  units_out_obj = get_unit_object(self.units_out)
@@ -404,9 +447,7 @@ class ChannelResponse(Base):
404
447
  for ii, f in enumerate(self.filters_list, 1):
405
448
  if f.type in ["coefficient"]:
406
449
  if f.units_out not in ["count"]:
407
- self.logger.debug(
408
- f"converting CoefficientFilter {f.name} to PZ"
409
- )
450
+ self.logger.debug(f"converting CoefficientFilter {f.name} to PZ")
410
451
  pz = PoleZeroFilter()
411
452
  pz.gain = f.gain
412
453
  pz.units_in = f.units_in
@@ -484,8 +525,7 @@ class ChannelResponse(Base):
484
525
 
485
526
  # get response of individual filters
486
527
  cr_list = [
487
- f.complex_response(self.frequencies, **cr_kwargs)
488
- for f in filters_list
528
+ f.complex_response(self.frequencies, **cr_kwargs) for f in filters_list
489
529
  ]
490
530
 
491
531
  # compute total response
@@ -1,13 +1,13 @@
1
- import copy
2
1
  import numpy as np
3
- from obspy.core import inventory
2
+ try:
3
+ from obspy.core import inventory
4
+ except ImportError:
5
+ inventory = None
4
6
 
5
7
  from mt_metadata.base import get_schema
6
8
  from mt_metadata.timeseries.filters.filter_base import FilterBase
7
- from mt_metadata.timeseries.filters.filter_base import get_base_obspy_mapping
8
9
  from mt_metadata.timeseries.filters.standards import SCHEMA_FN_PATHS
9
- from mt_metadata.base.helpers import write_lines
10
-
10
+ from mt_metadata.base.helpers import write_lines, requires
11
11
 
12
12
  # =============================================================================
13
13
  attr_dict = get_schema("filter_base", SCHEMA_FN_PATHS)
@@ -27,6 +27,7 @@ class CoefficientFilter(FilterBase):
27
27
  if self.gain == 0.0:
28
28
  self.gain = 1.0
29
29
 
30
+ @requires(obspy=inventory)
30
31
  def to_obspy(
31
32
  self,
32
33
  stage_number=1,
@@ -41,8 +41,6 @@ of the filter in frequency domain. It is very similar to an "obspy filter stage
41
41
  # =============================================================================
42
42
  # Imports
43
43
  # =============================================================================
44
- import copy
45
- import obspy
46
44
  import numpy as np
47
45
 
48
46
  from mt_metadata.base.helpers import write_lines
@@ -58,6 +56,7 @@ from mt_metadata.utils.mttime import MTime
58
56
  attr_dict = get_schema("filter_base", SCHEMA_FN_PATHS)
59
57
  # =============================================================================
60
58
 
59
+
61
60
  def get_base_obspy_mapping():
62
61
  """
63
62
  Different filters have different mappings, but the attributes mapped here are common to all of them.
@@ -87,6 +86,7 @@ class FilterBase(Base):
87
86
  it may find more application in future.
88
87
 
89
88
  """
89
+
90
90
  __doc__ = write_lines(attr_dict)
91
91
 
92
92
  def __init__(self, **kwargs):
@@ -98,6 +98,7 @@ class FilterBase(Base):
98
98
  self.comments = None
99
99
  self._obspy_mapping = None
100
100
  self.gain = 1.0
101
+ self._name = None
101
102
 
102
103
  super().__init__(attr_dict=attr_dict, **kwargs)
103
104
 
@@ -157,7 +158,6 @@ class FilterBase(Base):
157
158
  else:
158
159
  self._name = None
159
160
 
160
-
161
161
  @property
162
162
  def calibration_date(self):
163
163
  """
@@ -261,20 +261,17 @@ class FilterBase(Base):
261
261
 
262
262
  """
263
263
 
264
- if not isinstance(stage, obspy.core.inventory.response.ResponseStage):
265
- msg = f"Expected a Stage and got a {type(stage)}"
266
- cls().logger.error(msg)
267
- raise TypeError(msg)
268
-
269
264
  if mapping is None:
270
265
  mapping = cls().make_obspy_mapping()
271
266
  kwargs = {}
272
- for obspy_label, mth5_label in mapping.items():
273
- try:
274
- kwargs[mth5_label] = stage.__dict__[obspy_label]
275
- except KeyError:
276
- print(f"Key {obspy_label} not found in stage object")
277
- raise Exception
267
+
268
+ try:
269
+ for obspy_label, mth5_label in mapping.items():
270
+ kwargs[mth5_label] = getattr(stage, obspy_label)
271
+ except AttributeError:
272
+ msg = f"Expected a Stage and got a {type(stage)}"
273
+ cls().logger.error(msg)
274
+ raise TypeError(msg)
278
275
  return cls(**kwargs)
279
276
 
280
277
  def complex_response(self, frqs):
@@ -403,4 +400,3 @@ class FilterBase(Base):
403
400
  if self.decimation_factor != 1.0:
404
401
  return True
405
402
  return False
406
-
@@ -1,6 +1,12 @@
1
1
  import matplotlib.pyplot as plt
2
2
  import numpy as np
3
- from obspy.core.inventory.response import FIRResponseStage
3
+
4
+ from mt_metadata.base.helpers import requires
5
+
6
+ try:
7
+ from obspy.core.inventory.response import FIRResponseStage
8
+ except ImportError:
9
+ FIRResponseStage = None
4
10
  import scipy.signal as signal
5
11
 
6
12
  from mt_metadata.base import get_schema
@@ -124,6 +130,7 @@ class FIRFilter(FilterBase):
124
130
  plt.axis("tight")
125
131
  plt.show()
126
132
 
133
+ @requires(obspy=FIRResponseStage)
127
134
  def to_obspy(
128
135
  self, stage_number=1, normalization_frequency=1, sample_rate=1,
129
136
  ):
@@ -10,10 +10,15 @@
10
10
  import numpy as np
11
11
  from scipy.interpolate import interp1d
12
12
 
13
- from obspy.core.inventory.response import (
14
- ResponseListResponseStage,
15
- ResponseListElement,
16
- )
13
+ from mt_metadata.base.helpers import requires
14
+
15
+ try:
16
+ from obspy.core.inventory.response import (
17
+ ResponseListResponseStage,
18
+ ResponseListElement,
19
+ )
20
+ except ImportError:
21
+ ResponseListResponseStage = ResponseListElement = None
17
22
 
18
23
  from mt_metadata.base import get_schema
19
24
  from mt_metadata.timeseries.filters.filter_base import FilterBase
@@ -23,9 +28,7 @@ from mt_metadata.timeseries.filters.standards import SCHEMA_FN_PATHS
23
28
 
24
29
  # =============================================================================
25
30
  attr_dict = get_schema("filter_base", SCHEMA_FN_PATHS)
26
- attr_dict.add_dict(
27
- get_schema("frequency_response_table_filter", SCHEMA_FN_PATHS)
28
- )
31
+ attr_dict.add_dict(get_schema("frequency_response_table_filter", SCHEMA_FN_PATHS))
29
32
 
30
33
  # =============================================================================
31
34
 
@@ -72,7 +75,9 @@ class FrequencyResponseTableFilter(FilterBase):
72
75
  if isinstance(value, (list, tuple, np.ndarray)):
73
76
  self._empirical_frequencies = np.array(value, dtype=float)
74
77
  else:
75
- msg = f"input values must be an list, tuple, or np.ndarray, not {type(value)}"
78
+ msg = (
79
+ f"input values must be an list, tuple, or np.ndarray, not {type(value)}"
80
+ )
76
81
  self.logger.error(msg)
77
82
  raise TypeError(msg)
78
83
 
@@ -98,7 +103,9 @@ class FrequencyResponseTableFilter(FilterBase):
98
103
  self._empirical_amplitudes = np.array(value, dtype=float)
99
104
 
100
105
  else:
101
- msg = f"input values must be an list, tuple, or np.ndarray, not {type(value)}"
106
+ msg = (
107
+ f"input values must be an list, tuple, or np.ndarray, not {type(value)}"
108
+ )
102
109
  self.logger.error(msg)
103
110
  raise TypeError(msg)
104
111
 
@@ -142,7 +149,9 @@ class FrequencyResponseTableFilter(FilterBase):
142
149
  self._empirical_phases = np.deg2rad(self._empirical_phases)
143
150
 
144
151
  else:
145
- msg = f"input values must be an list, tuple, or np.ndarray, not {type(value)}"
152
+ msg = (
153
+ f"input values must be an list, tuple, or np.ndarray, not {type(value)}"
154
+ )
146
155
  self.logger.error(msg)
147
156
  raise TypeError(msg)
148
157
 
@@ -166,6 +175,7 @@ class FrequencyResponseTableFilter(FilterBase):
166
175
  """
167
176
  return self._empirical_frequencies.max()
168
177
 
178
+ @requires(obspy=(ResponseListResponseStage and ResponseListElement))
169
179
  def to_obspy(
170
180
  self,
171
181
  stage_number=1,
@@ -186,7 +196,12 @@ class FrequencyResponseTableFilter(FilterBase):
186
196
 
187
197
  """
188
198
  response_elements = []
189
- for f, a, p in zip(self.frequencies, self.amplitudes, self.phases):
199
+ # phase needs to be in degrees.
200
+ if np.abs(self.phases).max() < 2 * np.pi:
201
+ phases = np.rad2deg(self.phases)
202
+ else:
203
+ phases = self.phases
204
+ for f, a, p in zip(self.frequencies, self.amplitudes, phases):
190
205
  element = ResponseListElement(f, a, p)
191
206
  response_elements.append(element)
192
207
 
@@ -1,5 +1,3 @@
1
- import obspy
2
-
3
1
  from loguru import logger
4
2
  from mt_metadata.timeseries.filters.coefficient_filter import CoefficientFilter
5
3
  from mt_metadata.timeseries.filters.frequency_response_table_filter import (
@@ -6,7 +6,10 @@ Idea here is to add logic to interrogate stage filters received from StationXML
6
6
  # Imports
7
7
  # =============================================================================
8
8
  import numpy as np
9
- import obspy
9
+ try:
10
+ import obspy
11
+ except ImportError:
12
+ raise ImportError("obspy_stages requires obspy to be installed.")
10
13
  from loguru import logger
11
14
 
12
15
  from mt_metadata.timeseries.filters import (
@@ -7,12 +7,15 @@
7
7
 
8
8
  """
9
9
  import numpy as np
10
- import obspy
10
+ try:
11
+ import obspy
12
+ except ImportError:
13
+ obspy = None
11
14
  import scipy.signal as signal
12
15
 
13
16
  from mt_metadata.base import get_schema
14
- from mt_metadata.timeseries.filters.filter_base import FilterBase
15
- from mt_metadata.timeseries.filters.filter_base import get_base_obspy_mapping
17
+ from mt_metadata.base.helpers import requires
18
+ from mt_metadata.timeseries.filters.filter_base import FilterBase, get_base_obspy_mapping
16
19
  from mt_metadata.timeseries.filters.standards import SCHEMA_FN_PATHS
17
20
 
18
21
  # =============================================================================
@@ -21,16 +24,16 @@ attr_dict.add_dict(get_schema("pole_zero_filter", SCHEMA_FN_PATHS))
21
24
  # =============================================================================
22
25
 
23
26
 
24
-
25
27
  class PoleZeroFilter(FilterBase):
26
28
  def __init__(self, **kwargs):
27
29
 
30
+ self._poles = np.empty(0, dtype=complex)
31
+ self._zeros = np.empty(0, dtype=complex)
28
32
  super().__init__()
29
33
 
30
34
  super(FilterBase, self).__init__(attr_dict=attr_dict, **kwargs)
31
35
  self.type = "zpk"
32
36
 
33
-
34
37
  def make_obspy_mapping(self):
35
38
  mapping = get_base_obspy_mapping()
36
39
  mapping["_zeros"] = "zeros"
@@ -134,6 +137,7 @@ class PoleZeroFilter(FilterBase):
134
137
  """
135
138
  return self.gain * self.normalization_factor
136
139
 
140
+ @requires(obspy=obspy)
137
141
  def to_obspy(
138
142
  self,
139
143
  stage_number=1,
@@ -9,7 +9,13 @@
9
9
  """
10
10
 
11
11
  import numpy as np
12
- from obspy.core import inventory
12
+
13
+ from mt_metadata.base.helpers import requires
14
+
15
+ try:
16
+ from obspy.core import inventory
17
+ except ImportError:
18
+ inventory = None
13
19
 
14
20
  from mt_metadata.base import get_schema
15
21
  from mt_metadata.timeseries.filters.filter_base import FilterBase
@@ -35,6 +41,7 @@ class TimeDelayFilter(FilterBase):
35
41
  mapping["decimation_delay"] = "delay"
36
42
  return mapping
37
43
 
44
+ @requires(obspy=inventory)
38
45
  def to_obspy(self, stage_number=1, sample_rate=1, normalization_frequency=0):
39
46
  """
40
47
  Convert to an obspy stage
@@ -21,6 +21,8 @@ from . import Declination
21
21
  # =============================================================================
22
22
  attr_dict = get_schema("location", SCHEMA_FN_PATHS)
23
23
  attr_dict.add_dict(get_schema("declination", SCHEMA_FN_PATHS), "declination")
24
+
25
+
24
26
  # =============================================================================
25
27
  class Location(Base):
26
28
  __doc__ = write_lines(attr_dict)
@@ -28,6 +30,9 @@ class Location(Base):
28
30
  def __init__(self, **kwargs):
29
31
 
30
32
  self.declination = Declination()
33
+ self._latitude = 0.0
34
+ self._longitude = 0.0
35
+ self._elevation = 0.0
31
36
 
32
37
  super().__init__(attr_dict=attr_dict, **kwargs)
33
38
 
@@ -70,7 +75,9 @@ class Location(Base):
70
75
  lat_value = float(latitude)
71
76
 
72
77
  except TypeError:
73
- self.logger.debug("Could not convert {0} setting to 0".format(latitude))
78
+ self.logger.debug(
79
+ "Could not convert {0} setting to 0".format(latitude)
80
+ )
74
81
  return 0.0
75
82
 
76
83
  except ValueError:
@@ -102,7 +109,9 @@ class Location(Base):
102
109
  lon_value = float(longitude)
103
110
 
104
111
  except TypeError:
105
- self.logger.debug("Could not convert {0} setting to 0".format(longitude))
112
+ self.logger.debug(
113
+ "Could not convert {0} setting to 0".format(longitude)
114
+ )
106
115
  return 0.0
107
116
 
108
117
  except ValueError:
@@ -130,7 +139,9 @@ class Location(Base):
130
139
  try:
131
140
  elev_value = float(elevation)
132
141
  except (ValueError, TypeError):
133
- msg = "Could not convert {0} to a number setting to 0".format(elevation)
142
+ msg = "Could not convert {0} to a number setting to 0".format(
143
+ elevation
144
+ )
134
145
  self.logger.debug(msg)
135
146
  elev_value = 0.0
136
147
 
@@ -188,7 +199,9 @@ class Location(Base):
188
199
 
189
200
  p_list = position_str.split(":")
190
201
  if len(p_list) != 3:
191
- msg = "{0} not correct format, should be DD:MM:SS".format(position_str)
202
+ msg = "{0} not correct format, should be DD:MM:SS".format(
203
+ position_str
204
+ )
192
205
  self.logger.error(msg)
193
206
  raise ValueError(msg)
194
207
 
@@ -204,7 +217,9 @@ class Location(Base):
204
217
 
205
218
  position_value = sign * (abs(deg) + minutes / 60.0 + sec / 3600.0)
206
219
 
207
- self.logger.debug("Converted {0} to {1}".format(position_str, position_value))
220
+ self.logger.debug(
221
+ "Converted {0} to {1}".format(position_str, position_value)
222
+ )
208
223
 
209
224
  return position_value
210
225
 
@@ -17,6 +17,8 @@ from .standards import SCHEMA_FN_PATHS
17
17
 
18
18
  # =============================================================================
19
19
  attr_dict = get_schema("person", SCHEMA_FN_PATHS)
20
+
21
+
20
22
  # =============================================================================
21
23
  class Person(Base):
22
24
  __doc__ = write_lines(attr_dict)
@@ -25,10 +27,15 @@ class Person(Base):
25
27
 
26
28
  super().__init__(attr_dict=attr_dict, **kwargs)
27
29
 
28
- @property
29
- def author(self):
30
- return self.name
31
-
32
- @author.setter
33
- def author(self, value):
34
- self.name = value
30
+ # @property
31
+ # def author(self):
32
+ # if self.name is None:
33
+ # return
34
+ # return self.name
35
+
36
+ # @author.setter
37
+ # def author(self, value):
38
+ # if value is None:
39
+ # self.name = None
40
+ # else:
41
+ # self.name = value
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": {
3
3
  "type": "string",
4
- "required": true,
4
+ "required": false,
5
5
  "style": "free form",
6
6
  "units": null,
7
7
  "description": "Persons name, should be full first and last name.",
@@ -66,10 +66,10 @@
66
66
  "required": true,
67
67
  "style": "alpha numeric",
68
68
  "units": null,
69
- "description": "Run ID should be station name followed by a number or character. Characters should only be used if the run number is small, if the run number is high consider using digits with zeros. For example if you have 100 runs the run ID could be 001 or {station}001.",
69
+ "description": "Suggested Run ID should be sample rate followed by a number or character. Characters should only be used if the run number is small, if the run number is high consider using digits with zeros. For example if you have 100 runs the run ID could be 001 or sr{sample_rate}_001.",
70
70
  "options": [],
71
71
  "alias": [],
72
- "example": "001",
72
+ "example": "sr100_001",
73
73
  "default": null
74
74
  },
75
75
  "sample_rate": {