tectonic-utils 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tectonic_utils/.DS_Store +0 -0
- tectonic_utils/__init__.py +3 -0
- tectonic_utils/cover_picture.png +0 -0
- tectonic_utils/geodesy/.DS_Store +0 -0
- tectonic_utils/geodesy/.ruff_cache/.gitignore +1 -0
- tectonic_utils/geodesy/.ruff_cache/0.1.5/15663111236935520357 +0 -0
- tectonic_utils/geodesy/.ruff_cache/CACHEDIR.TAG +1 -0
- tectonic_utils/geodesy/__init__.py +0 -0
- tectonic_utils/geodesy/datums.py +156 -0
- tectonic_utils/geodesy/euler_pole.py +170 -0
- tectonic_utils/geodesy/fault_vector_functions.py +383 -0
- tectonic_utils/geodesy/haversine.py +193 -0
- tectonic_utils/geodesy/insar_vector_functions.py +285 -0
- tectonic_utils/geodesy/linear_elastic.py +231 -0
- tectonic_utils/geodesy/test/.DS_Store +0 -0
- tectonic_utils/geodesy/test/__init__.py +0 -0
- tectonic_utils/geodesy/test/test_conversion_functions.py +74 -0
- tectonic_utils/geodesy/test/test_euler_poles.py +33 -0
- tectonic_utils/geodesy/test/test_insar_vector_functions.py +36 -0
- tectonic_utils/geodesy/utilities.py +47 -0
- tectonic_utils/geodesy/xyz2llh.py +220 -0
- tectonic_utils/read_write/.DS_Store +0 -0
- tectonic_utils/read_write/.ruff_cache/.gitignore +1 -0
- tectonic_utils/read_write/.ruff_cache/0.1.5/680373307893520726 +0 -0
- tectonic_utils/read_write/.ruff_cache/CACHEDIR.TAG +1 -0
- tectonic_utils/read_write/__init__.py +0 -0
- tectonic_utils/read_write/general_io.py +55 -0
- tectonic_utils/read_write/netcdf_read_write.py +382 -0
- tectonic_utils/read_write/read_kml.py +68 -0
- tectonic_utils/read_write/test/.DS_Store +0 -0
- tectonic_utils/read_write/test/__init__.py +0 -0
- tectonic_utils/read_write/test/example_grd.grd +0 -0
- tectonic_utils/read_write/test/test_conversion_functions.py +40 -0
- tectonic_utils/read_write/test/written_example.grd +0 -0
- tectonic_utils/seismo/.DS_Store +0 -0
- tectonic_utils/seismo/.ruff_cache/.gitignore +1 -0
- tectonic_utils/seismo/.ruff_cache/0.1.5/12911000862714636977 +0 -0
- tectonic_utils/seismo/.ruff_cache/CACHEDIR.TAG +1 -0
- tectonic_utils/seismo/MT_calculations.py +132 -0
- tectonic_utils/seismo/__init__.py +0 -0
- tectonic_utils/seismo/moment_calculations.py +44 -0
- tectonic_utils/seismo/second_focal_plane.py +138 -0
- tectonic_utils/seismo/test/.DS_Store +0 -0
- tectonic_utils/seismo/test/__init__.py +0 -0
- tectonic_utils/seismo/test/test_WC.py +19 -0
- tectonic_utils/seismo/test/test_second_focal_plane.py +16 -0
- tectonic_utils/seismo/wells_and_coppersmith.py +167 -0
- tectonic_utils-0.1.2.dist-info/LICENSE.md +21 -0
- tectonic_utils-0.1.2.dist-info/METADATA +82 -0
- tectonic_utils-0.1.2.dist-info/RECORD +51 -0
- tectonic_utils-0.1.2.dist-info/WHEEL +4 -0
@@ -0,0 +1,382 @@
|
|
1
|
+
"""
|
2
|
+
Netcdf reading and writing functions.
|
3
|
+
Only Netcdf3 and Netcdf4 files with PIXEL NODE REGISTRATION are valid.
|
4
|
+
The assumption is 2D Netcdf files with 3 variables, in x-y-z order.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import numpy as np
|
8
|
+
import scipy.io.netcdf as netcdf
|
9
|
+
import subprocess
|
10
|
+
from netCDF4 import Dataset
|
11
|
+
|
12
|
+
|
13
|
+
# --------------- READING ------------------- #
|
14
|
+
|
15
|
+
def parse_pixelnode_registration(filename):
|
16
|
+
"""Ensure pixel node registration for netcdf file.
|
17
|
+
|
18
|
+
:param filename: name of file
|
19
|
+
:type filename: string
|
20
|
+
"""
|
21
|
+
output = subprocess.check_output(['gmt', 'grdinfo', filename], shell=False)
|
22
|
+
assert("Pixel node registration used" in str(output)), ValueError("ERROR! "+filename+" not pixel-node registered")
|
23
|
+
return
|
24
|
+
|
25
|
+
|
26
|
+
def properly_parse_three_variables(key1, key2, key3):
|
27
|
+
"""
|
28
|
+
Set proper ordering for known keys in a netcdf file. Options: [x, y, z]; [lon, lat, z]; [longitude, latitude, z].
|
29
|
+
|
30
|
+
:param key1: names of netcdf variable key
|
31
|
+
:type key1: string
|
32
|
+
:param key2: names of netcdf variable key
|
33
|
+
:type key2: string
|
34
|
+
:param key3: names of netcdf variable key
|
35
|
+
:type key3: string
|
36
|
+
:returns: ordered keys
|
37
|
+
:rtype: list
|
38
|
+
"""
|
39
|
+
key_list = [key1, key2, key3]
|
40
|
+
# get the x key:
|
41
|
+
if 'x' in key_list:
|
42
|
+
xkey = 'x'
|
43
|
+
elif 'lon' in key_list:
|
44
|
+
xkey = 'lon'
|
45
|
+
elif 'longitude' in key_list:
|
46
|
+
xkey = 'longitude'
|
47
|
+
else:
|
48
|
+
raise Exception("Xkey not determined")
|
49
|
+
# get the y key
|
50
|
+
if 'y' in key_list:
|
51
|
+
ykey = 'y'
|
52
|
+
elif 'lat' in key_list:
|
53
|
+
ykey = 'lat'
|
54
|
+
elif 'latitude' in key_list:
|
55
|
+
ykey = 'latitude'
|
56
|
+
else:
|
57
|
+
raise Exception("Ykey not determined")
|
58
|
+
key_list.pop(key_list.index(xkey))
|
59
|
+
key_list.pop(key_list.index(ykey))
|
60
|
+
if len(key_list) == 1:
|
61
|
+
zkey = key_list[0]
|
62
|
+
else:
|
63
|
+
raise Exception("Zkey not determined")
|
64
|
+
return [xkey, ykey, zkey]
|
65
|
+
|
66
|
+
|
67
|
+
def read_netcdf3(filename):
|
68
|
+
"""
|
69
|
+
A netcdf3 reading function for pixel-node registered files with recognized key patterns.
|
70
|
+
|
71
|
+
:param filename: name of netcdf3 file
|
72
|
+
:type filename: string
|
73
|
+
:returns: [xdata, ydata, zdata]
|
74
|
+
:rtype: list of 3 np.ndarrays
|
75
|
+
"""
|
76
|
+
print("Reading file %s " % filename)
|
77
|
+
file = netcdf.netcdf_file(filename, 'r')
|
78
|
+
parse_pixelnode_registration(filename)
|
79
|
+
if len(file.variables.keys()) == 6:
|
80
|
+
# Use a gdal parsing: ['x_range', 'y_range', 'z_range', 'spacing', 'dimension', 'z']
|
81
|
+
xinc = float(file.variables['spacing'][0])
|
82
|
+
yinc = float(file.variables['spacing'][1])
|
83
|
+
xstart = float(file.variables['x_range'][0]) + xinc/2 # pixel-node-registered
|
84
|
+
xfinish = float(file.variables['x_range'][1])
|
85
|
+
xdata0 = np.arange(xstart, xfinish, xinc)
|
86
|
+
ystart = float(file.variables['y_range'][0]) + xinc/2 # pixel-node-registered
|
87
|
+
yfinish = float(file.variables['y_range'][1])
|
88
|
+
ydata0 = np.arange(ystart, yfinish, yinc)
|
89
|
+
zdata0 = file.variables['z'][:].copy()
|
90
|
+
zdata0 = np.flipud(np.reshape(zdata0, (len(ydata0), len(xdata0)))) # frustrating.
|
91
|
+
else:
|
92
|
+
# Use a standard parsing: 3 keys, in lon/lat/z or x/y/z order
|
93
|
+
[xkey, ykey, zkey] = file.variables.keys()
|
94
|
+
[xkey, ykey, zkey] = properly_parse_three_variables(xkey, ykey, zkey)
|
95
|
+
xdata0 = file.variables[xkey][:].copy()
|
96
|
+
ydata0 = file.variables[ykey][:].copy()
|
97
|
+
zdata0 = file.variables[zkey][::].copy()
|
98
|
+
return [xdata0, ydata0, zdata0]
|
99
|
+
|
100
|
+
|
101
|
+
def read_netcdf4(filename):
|
102
|
+
"""
|
103
|
+
A netcdf4 reading function for pixel-node registered files with recognized key patterns.
|
104
|
+
|
105
|
+
:param filename: name of netcdf4 file
|
106
|
+
:type filename: string
|
107
|
+
:returns: [xdata, ydata, zdata]
|
108
|
+
:rtype: list of 3 np.ndarrays
|
109
|
+
"""
|
110
|
+
print("Reading file %s " % filename)
|
111
|
+
rootgrp = Dataset(filename, "r")
|
112
|
+
parse_pixelnode_registration(filename)
|
113
|
+
if len(rootgrp.variables.keys()) == 6:
|
114
|
+
# Use a gdal parsing: ['x_range', 'y_range', 'z_range', 'spacing', 'dimension', 'z']
|
115
|
+
xinc = float(rootgrp.variables['spacing'][0])
|
116
|
+
yinc = float(rootgrp.variables['spacing'][1])
|
117
|
+
xstart = float(rootgrp.variables['x_range'][0]) + xinc/2 # pixel-node-registered
|
118
|
+
xfinish = float(rootgrp.variables['x_range'][1])
|
119
|
+
xvar = np.arange(xstart, xfinish, xinc)
|
120
|
+
ystart = float(rootgrp.variables['y_range'][0]) + xinc/2 # pixel-node-registered
|
121
|
+
yfinish = float(rootgrp.variables['y_range'][1])
|
122
|
+
yvar = np.arange(ystart, yfinish, yinc)
|
123
|
+
zvar = rootgrp.variables['z'][:].copy()
|
124
|
+
zvar = np.flipud(np.reshape(zvar, (len(yvar), len(xvar)))) # frustrating.
|
125
|
+
else:
|
126
|
+
[xkey, ykey, zkey] = rootgrp.variables.keys()
|
127
|
+
[xkey, ykey, zkey] = properly_parse_three_variables(xkey, ykey, zkey)
|
128
|
+
xvar = rootgrp.variables[xkey][:]
|
129
|
+
yvar = rootgrp.variables[ykey][:]
|
130
|
+
zvar = rootgrp.variables[zkey][:, :]
|
131
|
+
return [xvar, yvar, zvar]
|
132
|
+
|
133
|
+
|
134
|
+
def read_any_grd(filename):
|
135
|
+
"""
|
136
|
+
A general netcdf4/netcdf3 reading function for pixel-node registered files with recognized key patterns.
|
137
|
+
|
138
|
+
:param filename: name of file
|
139
|
+
:type filename: string
|
140
|
+
:returns: [xdata, ydata, zdata]
|
141
|
+
:rtype: list of 3 np.ndarrays
|
142
|
+
"""
|
143
|
+
try:
|
144
|
+
[xdata, ydata, zdata] = read_netcdf3(filename)
|
145
|
+
except TypeError:
|
146
|
+
[xdata, ydata, zdata] = read_netcdf4(filename)
|
147
|
+
return [xdata, ydata, zdata]
|
148
|
+
|
149
|
+
|
150
|
+
def give_metrics_on_grd(filename):
|
151
|
+
"""Print shape, min/max, and NaN metrics on a netcdf grid file.
|
152
|
+
|
153
|
+
:param filename: name of grd file
|
154
|
+
:type filename: string
|
155
|
+
"""
|
156
|
+
grid_data = read_any_grd(filename)[2]
|
157
|
+
nan_pixels = np.count_nonzero(np.isnan(grid_data))
|
158
|
+
total_pix = np.shape(grid_data)[0] * np.shape(grid_data)[1]
|
159
|
+
print("Shape of %s is [%d, %d]" % (filename, np.shape(grid_data)[0], np.shape(grid_data)[1]))
|
160
|
+
print("Min data is %f " % (np.nanmin(grid_data)))
|
161
|
+
print("Max data is %f " % (np.nanmax(grid_data)))
|
162
|
+
print("Nans: %d of %d pixels are nans (%.3f percent)" % (nan_pixels, total_pix, nan_pixels / total_pix * 100))
|
163
|
+
return
|
164
|
+
|
165
|
+
|
166
|
+
def read_3D_netcdf(filename):
|
167
|
+
"""
|
168
|
+
Reading function for 3D netcdf pixel-node registered files with key pattern 't, x, y, z'.
|
169
|
+
|
170
|
+
:param filename: name of netcdf file
|
171
|
+
:type filename: string
|
172
|
+
:returns: [tdata, xdata, ydata, zdata]
|
173
|
+
:rtype: list of 4 np.ndarrays
|
174
|
+
"""
|
175
|
+
filereader = netcdf.netcdf_file(filename, 'r')
|
176
|
+
tdata0 = filereader.variables['t'][:]
|
177
|
+
xdata0 = filereader.variables['x'][:]
|
178
|
+
ydata0 = filereader.variables['y'][:]
|
179
|
+
zdata0 = filereader.variables['z'][:, :, :]
|
180
|
+
return [tdata0.copy(), xdata0.copy(), ydata0.copy(), zdata0.copy()]
|
181
|
+
|
182
|
+
|
183
|
+
# --------------- WRITING ------------------- #
|
184
|
+
|
185
|
+
|
186
|
+
def write_temp_output_txt(z, outfile):
|
187
|
+
"""A helper function for dumping grid data into pixel-node-registered grd files.
|
188
|
+
|
189
|
+
:param z: 2D array of floats
|
190
|
+
:param outfile: string, filename
|
191
|
+
"""
|
192
|
+
(y, x) = np.shape(z)
|
193
|
+
z = np.reshape(z, (x*y,))
|
194
|
+
# z = np.array(z).astype(str)
|
195
|
+
# z[z == 'nan'] = '-9999'
|
196
|
+
# np.savetxt(outfile, z, fmt='%s');
|
197
|
+
ofile = open(outfile, 'w+b') # binary file
|
198
|
+
zbytes = bytearray(z)
|
199
|
+
ofile.write(zbytes)
|
200
|
+
ofile.close()
|
201
|
+
print("writing temporary outfile %s " % outfile)
|
202
|
+
return
|
203
|
+
|
204
|
+
|
205
|
+
def write_netcdf4(x, y, z, outfile, precision=10):
|
206
|
+
"""
|
207
|
+
Writing PIXEL NODE registered netcdf4 file from numpy array.
|
208
|
+
Internal strategy: send out to a binary file and make GMT convert to netcdf.
|
209
|
+
Note: Precision of 10 (or something high) ensures better performance for higher latitudes, like >60°.
|
210
|
+
|
211
|
+
:param x: 1D array of floats
|
212
|
+
:param y: 1D array of floats
|
213
|
+
:param z: 2D array of floats
|
214
|
+
:param outfile: filename, string
|
215
|
+
:param precision: how many decimal places for the x-inc and y-inc? Use higher precision for high latitudes.
|
216
|
+
"""
|
217
|
+
print("writing outfile %s " % outfile)
|
218
|
+
outtxt = outfile+'.xyz'
|
219
|
+
write_temp_output_txt(z, outtxt)
|
220
|
+
xinc = np.round(x[1] - x[0], precision)
|
221
|
+
yinc = np.round(y[1] - y[0], precision)
|
222
|
+
xmin = np.round(np.min(x)-xinc/2, precision) # for the half-pixel outside the edge
|
223
|
+
xmax = np.round(np.max(x)+xinc/2, precision) # writing pixel-node reg. from Python's netcdf into .grd files
|
224
|
+
ymin = np.round(np.min(y)-yinc/2, precision) # writing pixel-node reg. from Python's netcdf into .grd files
|
225
|
+
ymax = np.round(np.max(y)+yinc/2, precision) # writing pixel-node reg. from Python's netcdf into .grd files
|
226
|
+
increments = str(xinc)+'/'+str(yinc)
|
227
|
+
region = str(xmin)+'/'+str(xmax)+'/'+str(ymin)+'/'+str(ymax)
|
228
|
+
if isinstance(z[0][0], np.float64):
|
229
|
+
binary_format_flags = '-ZBLd' # double precision floating piont number, standard numpy float
|
230
|
+
else:
|
231
|
+
binary_format_flags = '-ZBLf' # 4-byte floating point number
|
232
|
+
command = 'gmt xyz2grd '+outtxt+' -G'+outfile+' -I'+increments+' -R'+region+' '+binary_format_flags +\
|
233
|
+
' -r -fg -di-9999 '
|
234
|
+
print(command)
|
235
|
+
subprocess.call(['gmt', 'xyz2grd', outtxt, '-G'+outfile, '-I'+increments, '-R'+region, binary_format_flags, '-r',
|
236
|
+
'-fg', '-di-9999'], shell=False)
|
237
|
+
subprocess.call(['rm', outtxt], shell=False)
|
238
|
+
return
|
239
|
+
|
240
|
+
|
241
|
+
def produce_output_netcdf(xdata, ydata, zdata, zunits, netcdfname, dtype=float):
|
242
|
+
"""
|
243
|
+
Write netcdf3 grid file.
|
244
|
+
NOTE: The pixel vs gridline registration of this function is not guaranteed;
|
245
|
+
depends on file system and float type and precision :(.
|
246
|
+
Safer to use write_netcdf4().
|
247
|
+
"""
|
248
|
+
print("Writing output netcdf to file %s " % netcdfname)
|
249
|
+
f = netcdf.netcdf_file(netcdfname, 'w')
|
250
|
+
f.history = 'Created for a test'
|
251
|
+
f.createDimension('x', len(xdata))
|
252
|
+
f.createDimension('y', len(ydata))
|
253
|
+
print(np.shape(zdata))
|
254
|
+
x = f.createVariable('x', dtype, ('x',))
|
255
|
+
x[:] = xdata
|
256
|
+
x.units = 'range'
|
257
|
+
y = f.createVariable('y', dtype, ('y',))
|
258
|
+
y[:] = ydata
|
259
|
+
y.units = 'azimuth'
|
260
|
+
z = f.createVariable('z', dtype, ('y', 'x',))
|
261
|
+
z[:, :] = zdata
|
262
|
+
z.units = zunits
|
263
|
+
f.close()
|
264
|
+
flip_if_necessary(netcdfname)
|
265
|
+
return
|
266
|
+
|
267
|
+
|
268
|
+
def flip_if_necessary(filename):
|
269
|
+
"""If netcdf3 file is stored with xinc or yinc backwards, we replace with a copy that flips the affected axis.
|
270
|
+
|
271
|
+
:param filename: name of file
|
272
|
+
:type filename: string
|
273
|
+
"""
|
274
|
+
xinc = subprocess.check_output('gmt grdinfo -M -C ' + filename + ' | awk \'{print $8}\'',
|
275
|
+
shell=True) # x-increment
|
276
|
+
yinc = subprocess.check_output('gmt grdinfo -M -C ' + filename + ' | awk \'{print $9}\'',
|
277
|
+
shell=True) # y-increment
|
278
|
+
xinc = float(xinc.split()[0])
|
279
|
+
yinc = float(yinc.split()[0])
|
280
|
+
|
281
|
+
if xinc < 0: # FLIP THE X-AXIS
|
282
|
+
print("flipping the x-axis")
|
283
|
+
[xdata, ydata] = read_netcdf3(filename)[0:2]
|
284
|
+
data = read_netcdf3(filename)[2]
|
285
|
+
# This is the key! Flip the x-axis when necessary.
|
286
|
+
# xdata=np.flip(xdata,0); # This is sometimes necessary and sometimes not! Not sure why.
|
287
|
+
produce_output_netcdf(xdata, ydata, data, 'mm/yr', filename)
|
288
|
+
xinc = subprocess.check_output('gmt grdinfo -M -C ' + filename + ' | awk \'{print $8}\'',
|
289
|
+
shell=True) # x-increment
|
290
|
+
xinc = float(xinc.split()[0])
|
291
|
+
print("New xinc is: %f " % xinc)
|
292
|
+
if yinc < 0:
|
293
|
+
print("flipping the y-axis")
|
294
|
+
[xdata, ydata] = read_netcdf3(filename)[0:2]
|
295
|
+
data = read_netcdf3(filename)[2]
|
296
|
+
# Flip the y-axis when necessary.
|
297
|
+
# ydata=np.flip(ydata,0);
|
298
|
+
produce_output_netcdf(xdata, ydata, data, 'mm/yr', filename)
|
299
|
+
yinc = subprocess.check_output('gmt grdinfo -M -C ' + filename + ' | awk \'{print $9}\'',
|
300
|
+
shell=True) # y-increment
|
301
|
+
yinc = float(yinc.split()[0])
|
302
|
+
print("New yinc is: %f" % yinc)
|
303
|
+
return
|
304
|
+
|
305
|
+
|
306
|
+
def produce_output_TS_grids(xdata, ydata, zdata, timearray, zunits, outfile):
|
307
|
+
"""Write many netcdf3 files, one for each step of a timearray. Each file will be named with a datetime suffix.
|
308
|
+
|
309
|
+
:param xdata: 1D array of floats
|
310
|
+
:param ydata: 1D array of floats
|
311
|
+
:param zdata: 3D array of floats
|
312
|
+
:param timearray: 1D array of anything
|
313
|
+
:param zunits: string
|
314
|
+
:param outfile: string, filename
|
315
|
+
"""
|
316
|
+
print("Shape of zdata originally:", np.shape(zdata))
|
317
|
+
for i in range(len(timearray)):
|
318
|
+
zdata_slice = np.zeros([len(ydata), len(xdata)])
|
319
|
+
for k in range(len(xdata)):
|
320
|
+
for j in range(len(ydata)):
|
321
|
+
temp_array = zdata[j][k][0]
|
322
|
+
zdata_slice[j][k] = temp_array[i]
|
323
|
+
produce_output_netcdf(xdata, ydata, zdata_slice, zunits, outfile)
|
324
|
+
return
|
325
|
+
|
326
|
+
|
327
|
+
def produce_output_timeseries(xdata, ydata, zdata, timearray, zunits, netcdfname):
|
328
|
+
"""Write dataset with t, x, y, z into large 3D netcdf.
|
329
|
+
Each 2D slice is the displacement at a particular time, associated with a time series.
|
330
|
+
zdata comes in as a 2D array where each element is a timeseries (1D array), so it must be re-packaged into
|
331
|
+
3D array before we save it.
|
332
|
+
Broke during long SoCal experiment for some reason. f.close() didn't work.
|
333
|
+
|
334
|
+
:param xdata: 1D array of floats
|
335
|
+
:param ydata: 1D array of floats
|
336
|
+
:param zdata: 3D array of floats
|
337
|
+
:param timearray: 1D array of anything
|
338
|
+
:param zunits: string
|
339
|
+
:param netcdfname: string, filename
|
340
|
+
"""
|
341
|
+
|
342
|
+
print("Shape of zdata originally:", np.shape(zdata))
|
343
|
+
zdata_repacked = np.zeros([len(timearray), len(ydata), len(xdata)])
|
344
|
+
print("Intended repackaged zdata of shape: ", np.shape(zdata_repacked))
|
345
|
+
if np.shape(zdata) == np.shape(zdata_repacked):
|
346
|
+
print("No repacking necessary")
|
347
|
+
zdata_repacked = zdata
|
348
|
+
else:
|
349
|
+
print("Repacking zdata into zdata_repacked")
|
350
|
+
for i in range(len(zdata[0][0][0])): # for each time interval:
|
351
|
+
print(i)
|
352
|
+
for k in range(len(xdata)):
|
353
|
+
for j in range(len(ydata)):
|
354
|
+
temp_array = zdata[j][k][0]
|
355
|
+
zdata_repacked[i][j][k] = temp_array[i]
|
356
|
+
|
357
|
+
print("Writing output netcdf to file %s " % netcdfname)
|
358
|
+
days_array = []
|
359
|
+
for i in range(len(timearray)):
|
360
|
+
delta = timearray[i] - timearray[0]
|
361
|
+
days_array.append(delta.days)
|
362
|
+
f = netcdf.netcdf_file(netcdfname, 'w')
|
363
|
+
f.history = 'Created for a test'
|
364
|
+
f.createDimension('t', len(timearray))
|
365
|
+
f.createDimension('x', len(xdata))
|
366
|
+
f.createDimension('y', len(ydata))
|
367
|
+
|
368
|
+
t = f.createVariable('t', 'i4', ('t',))
|
369
|
+
t[:] = days_array
|
370
|
+
t.units = 'days'
|
371
|
+
x = f.createVariable('x', float, ('x',))
|
372
|
+
x[:] = xdata
|
373
|
+
x.units = 'range'
|
374
|
+
y = f.createVariable('y', float, ('y',))
|
375
|
+
y[:] = ydata
|
376
|
+
y.units = 'azimuth'
|
377
|
+
|
378
|
+
z = f.createVariable('z', float, ('t', 'y', 'x'))
|
379
|
+
z[:, :, :] = zdata_repacked
|
380
|
+
z.units = zunits
|
381
|
+
f.close()
|
382
|
+
return
|
@@ -0,0 +1,68 @@
|
|
1
|
+
|
2
|
+
from fastkml import kml
|
3
|
+
|
4
|
+
|
5
|
+
def read_simple_kml(infile):
|
6
|
+
"""
|
7
|
+
Read a simple box drawn in Google Earth and saved as a KML file with field 'coordinates'.
|
8
|
+
For more complicated KMLs with many elements, I'm switching to use the Python library "fastkml" for reading.
|
9
|
+
|
10
|
+
:param infile: kml file with simple box
|
11
|
+
:type infile: string
|
12
|
+
:returns: lons, lats as lists that represent the coordinates of box vertices
|
13
|
+
:rtype: list, list
|
14
|
+
"""
|
15
|
+
print("Reading %s into arrays of lon and lat..." % infile)
|
16
|
+
start = 0
|
17
|
+
lats, lons = [], []
|
18
|
+
ifile = open(infile, 'r')
|
19
|
+
for line in ifile:
|
20
|
+
if start == 1:
|
21
|
+
temp = line.split()
|
22
|
+
for item in temp:
|
23
|
+
lons.append(float(item.split(',')[0]))
|
24
|
+
lats.append(float(item.split(',')[1]))
|
25
|
+
break
|
26
|
+
if "coordinates" in line:
|
27
|
+
start = 1
|
28
|
+
return lons, lats
|
29
|
+
|
30
|
+
|
31
|
+
def read_kml_geometries_into_coord_lists(kml_name):
|
32
|
+
"""
|
33
|
+
Iteratively traverse the etree inside a kml file, looking for things that have 'geometry'.
|
34
|
+
This includes Placemarks: i.e., Point, LineString, or Polygon objects
|
35
|
+
If they do have 'geometry', then we can print their coordinates and return them as lists.
|
36
|
+
An attempt to create a GMT-compatible fault format from a KML
|
37
|
+
|
38
|
+
:param kml_name: string, name of file
|
39
|
+
:return: list of tuples or lists, containing coordinates of the drawn points
|
40
|
+
"""
|
41
|
+
coord_lists = []
|
42
|
+
|
43
|
+
with open(kml_name, "rb") as f:
|
44
|
+
doc = f.read()
|
45
|
+
|
46
|
+
k = kml.KML()
|
47
|
+
k.from_string(doc)
|
48
|
+
|
49
|
+
def walk_feature(feature, depth=0):
|
50
|
+
indent = ' ' * depth
|
51
|
+
print(f"{indent}- {type(feature).__name__}: {getattr(feature, 'name', None)}")
|
52
|
+
|
53
|
+
if hasattr(feature, 'geometry') and feature.geometry:
|
54
|
+
# print(f"{indent} Geometry: {feature.geometry}")
|
55
|
+
geom = feature.geometry
|
56
|
+
coords = list(geom.coords) if hasattr(geom, 'coords') else geom
|
57
|
+
# print(f"{indent} Coordinates: {coords}")
|
58
|
+
coord_lists.append(coords)
|
59
|
+
|
60
|
+
if hasattr(feature, 'features'):
|
61
|
+
for subfeature in feature.features():
|
62
|
+
walk_feature(subfeature, depth + 1)
|
63
|
+
|
64
|
+
# Start from the root object
|
65
|
+
for feature in k.features():
|
66
|
+
walk_feature(feature)
|
67
|
+
|
68
|
+
return coord_lists
|
Binary file
|
File without changes
|
Binary file
|
@@ -0,0 +1,40 @@
|
|
1
|
+
# Testing code
|
2
|
+
|
3
|
+
import numpy as np
|
4
|
+
import unittest
|
5
|
+
import subprocess
|
6
|
+
from .. import netcdf_read_write
|
7
|
+
|
8
|
+
|
9
|
+
class Tests(unittest.TestCase):
|
10
|
+
|
11
|
+
def test_pixel_node_writer(self):
|
12
|
+
"""
|
13
|
+
See if the writing function for pixel-node files produces a pixel-node file.
|
14
|
+
The behavior has been finicky for float32 vs float64
|
15
|
+
Writing a full test for float32 would be good (although the example grd file gets pretty close)
|
16
|
+
"""
|
17
|
+
grid_def = [-120, -114, 32, 37]
|
18
|
+
inc = [0.02, 0.02]
|
19
|
+
filename = 'test_outfile.nc'
|
20
|
+
lons = np.arange(grid_def[0], grid_def[1] + 0.00001, inc[0])
|
21
|
+
lats = np.arange(grid_def[2], grid_def[3] + 0.00001, inc[1])
|
22
|
+
|
23
|
+
# Test a write function
|
24
|
+
grid = np.zeros((len(lats), len(lons)))
|
25
|
+
netcdf_read_write.write_netcdf4(lons, lats, grid, filename)
|
26
|
+
netcdf_read_write.parse_pixelnode_registration(filename)
|
27
|
+
subprocess.call(['rm', filename], shell=False)
|
28
|
+
subprocess.call(['rm', 'gmt.history'], shell=False)
|
29
|
+
|
30
|
+
# Test a read-write cycle on an example grid
|
31
|
+
[x, y, z] = netcdf_read_write.read_any_grd("Tectonic_Utils/read_write/test/example_grd.grd")
|
32
|
+
netcdf_read_write.write_netcdf4(x, y, z, "Tectonic_Utils/read_write/test/written_example.grd")
|
33
|
+
netcdf_read_write.parse_pixelnode_registration("Tectonic_Utils/read_write/test/written_example.grd")
|
34
|
+
subprocess.call(['rm', 'gmt.history'], shell=False)
|
35
|
+
|
36
|
+
return
|
37
|
+
|
38
|
+
|
39
|
+
if __name__ == "__main__":
|
40
|
+
unittest.main()
|
Binary file
|
Binary file
|
@@ -0,0 +1 @@
|
|
1
|
+
*
|
Binary file
|
@@ -0,0 +1 @@
|
|
1
|
+
Signature: 8a477f597d28d172789f06886806bc55
|
@@ -0,0 +1,132 @@
|
|
1
|
+
"""
|
2
|
+
Calculations that deal with seismic moment tensors.
|
3
|
+
Notes from Lay and Wallace Chapter 8:
|
4
|
+
|
5
|
+
* Decomposition 1: Mij = isotropic + deviatoric
|
6
|
+
* Decomposition 2: Mij = isotropic + 3 vector dipoles
|
7
|
+
* Decomposition 3: Mij = isotropic + 3 double couples
|
8
|
+
* Decomposition 4: Mij = isotropic + 3 CLVDs
|
9
|
+
* Decomposition 5: Mij = isotropic + major DC + minor DC
|
10
|
+
* Decomposition 6: Mij = isotropic + DC + CLVD
|
11
|
+
|
12
|
+
The most useful in practice are Decomposition 1 and Decomposition 6.
|
13
|
+
"""
|
14
|
+
|
15
|
+
import numpy as np
|
16
|
+
|
17
|
+
def get_MT(mrr, mtt, mpp, mrt, mrp, mtp):
|
18
|
+
"""
|
19
|
+
Build a matrix from the six components of the moment tensor
|
20
|
+
|
21
|
+
:returns: np.array, 3x3 matrix
|
22
|
+
"""
|
23
|
+
MT = np.array([[mrr, mrt, mrp], [mrt, mtt, mtp], [mrp, mtp, mpp]])
|
24
|
+
return MT
|
25
|
+
|
26
|
+
|
27
|
+
def diagonalize_MT(MT):
|
28
|
+
"""
|
29
|
+
Build a diagonal matrix whose elements are the ordered eigenvalues of original matrix MT.
|
30
|
+
|
31
|
+
:returns: np.array, 3x3 matrix
|
32
|
+
"""
|
33
|
+
eigvals, eigvecs = np.linalg.eig(MT)
|
34
|
+
eigvals = sorted(eigvals)[::-1]
|
35
|
+
return np.diag(eigvals)
|
36
|
+
|
37
|
+
|
38
|
+
def get_deviatoric_MT(MT):
|
39
|
+
"""
|
40
|
+
Get deviatoric MT from a full MT.
|
41
|
+
|
42
|
+
:returns: np.array, 3x3 matrix
|
43
|
+
"""
|
44
|
+
iso_MT = get_iso_MT(MT)
|
45
|
+
M_dev = np.subtract(MT, iso_MT)
|
46
|
+
return M_dev
|
47
|
+
|
48
|
+
|
49
|
+
def get_iso_MT(MT):
|
50
|
+
"""
|
51
|
+
Calculate the isotropic moment tensor from a full MT.
|
52
|
+
|
53
|
+
:returns: np.array, 3x3 matrix
|
54
|
+
"""
|
55
|
+
x = (1 / 3) * np.trace(MT)
|
56
|
+
iso_MT = np.multiply(np.eye(3), x)
|
57
|
+
return iso_MT
|
58
|
+
|
59
|
+
|
60
|
+
def get_clvd_dc_from_deviatoric_MT(MT):
|
61
|
+
"""
|
62
|
+
Calculate the dc and clvd components of a deviatoric MT, from Shearer Equation 9.14.
|
63
|
+
|
64
|
+
:returns: two np.arrays, each 3x3 matrix
|
65
|
+
"""
|
66
|
+
eigenvalues = np.diag(MT)
|
67
|
+
assert(eigenvalues[0] > eigenvalues[1] > eigenvalues[2]), ValueError("Deviatoric eigenvalues out of order.")
|
68
|
+
dc_component = (1/2)*(eigenvalues[0]-eigenvalues[2])
|
69
|
+
clvd_component = eigenvalues[1]*(1/2)
|
70
|
+
M_dc = np.diag([dc_component, 0, -dc_component])
|
71
|
+
M_clvd = np.diag([-clvd_component, 2*clvd_component, -clvd_component])
|
72
|
+
return M_clvd, M_dc
|
73
|
+
|
74
|
+
|
75
|
+
def decompose_iso_dc_clvd(MT):
|
76
|
+
"""
|
77
|
+
Decompose a full moment tensor into an isotropic part, a double-couple, and a CLVD component.
|
78
|
+
|
79
|
+
:returns: three np.arrays, each 3x3 matrix
|
80
|
+
"""
|
81
|
+
diag_MT = diagonalize_MT(MT) # equivalent to a coordinate transformation
|
82
|
+
M_iso = get_iso_MT(diag_MT) # get the trace
|
83
|
+
M_dev = get_deviatoric_MT(diag_MT)
|
84
|
+
M_dev = diagonalize_MT(M_dev) # diagonalized in the proper order
|
85
|
+
M_clvd, M_dc = get_clvd_dc_from_deviatoric_MT(M_dev)
|
86
|
+
return M_iso, M_clvd, M_dc
|
87
|
+
|
88
|
+
|
89
|
+
# def get_separate_scalar_moments(MT):
|
90
|
+
# """return isotropic, clvd, and double couple moments. Not frequently used."""
|
91
|
+
# M_iso, M_clvd, M_dc = decompose_iso_dc_clvd(MT);
|
92
|
+
# iso_moment = abs(M_iso[0][0]);
|
93
|
+
# clvd_moment = abs(M_clvd[0][0]);
|
94
|
+
# dc_moment = abs(M_dc[0][0]);
|
95
|
+
# return iso_moment, clvd_moment, dc_moment;
|
96
|
+
|
97
|
+
|
98
|
+
def get_total_scalar_moment(MT):
|
99
|
+
"""
|
100
|
+
Shearer Equation 9.8: quadratic sum of element of moment tensor components, in newton-meters.
|
101
|
+
|
102
|
+
:param MT: np.array, 3x3 matrix
|
103
|
+
:returns: Mo, scalar moment
|
104
|
+
:rtype: float
|
105
|
+
"""
|
106
|
+
MT = np.divide(MT, 1e16) # done to prevent computer buffer overflow
|
107
|
+
total = 0
|
108
|
+
for i in range(3):
|
109
|
+
for j in range(3):
|
110
|
+
total = total + MT[i][j]*MT[i][j]
|
111
|
+
Mo = (1/np.sqrt(2)) * np.sqrt(total)
|
112
|
+
Mo = np.multiply(Mo, 1e16)
|
113
|
+
return Mo
|
114
|
+
|
115
|
+
|
116
|
+
def get_percent_double_couple(MT):
|
117
|
+
"""
|
118
|
+
Get the percent double couple and percent clvd moment from a deviatoric moment tensor.
|
119
|
+
When isotropic term is involved, this can get more complicated and there are several approaches.
|
120
|
+
See Shearer equation 9.17 for epsilon.
|
121
|
+
See Vavrycuk, 2001 for other approaches when isotropic component is involved.
|
122
|
+
|
123
|
+
:param MT: np.array, 3x3 matrix
|
124
|
+
:returns: percentage double couple, percentage CLVD
|
125
|
+
:rtype: float, float
|
126
|
+
"""
|
127
|
+
m_dev = diagonalize_MT(get_deviatoric_MT(MT))
|
128
|
+
epsilon = np.diag(m_dev)[1] / np.max([np.abs(np.diag(m_dev)[0]), np.abs(np.diag(m_dev)[2])])
|
129
|
+
fraction = epsilon * 2
|
130
|
+
perc_clvd = 100 * (abs(fraction))
|
131
|
+
perc_dc = 100 - perc_clvd
|
132
|
+
return perc_dc, perc_clvd
|
File without changes
|
@@ -0,0 +1,44 @@
|
|
1
|
+
|
2
|
+
"""Utilities that convert between moment and magnitude, etc. """
|
3
|
+
|
4
|
+
import numpy as np
|
5
|
+
|
6
|
+
|
7
|
+
def moment_from_muad(mu, A, d):
|
8
|
+
"""moment = mu * A * d.
|
9
|
+
|
10
|
+
:param mu: shear modulus, in Pa
|
11
|
+
:type mu: float
|
12
|
+
:param A: area, in m^2
|
13
|
+
:type A: float
|
14
|
+
:param d: slip, in m
|
15
|
+
:type d: float
|
16
|
+
:returns: moment, in Newton-meters
|
17
|
+
:rtype: float
|
18
|
+
"""
|
19
|
+
return mu*A*d
|
20
|
+
|
21
|
+
def mw_from_moment(moment):
|
22
|
+
"""Definition of moment magnitude from Hanks and Kanamori (1979). Takes newton-meters, returns moment magnitude.
|
23
|
+
|
24
|
+
:param moment: moment in Newton-meters
|
25
|
+
:type moment: float
|
26
|
+
:returns: magnitude
|
27
|
+
:rtype: float
|
28
|
+
"""
|
29
|
+
moment = moment*1e7 # convert to dyne-cm
|
30
|
+
mw = (2/3)*np.log10(moment) - 10.7
|
31
|
+
return mw
|
32
|
+
|
33
|
+
def moment_from_mw(Mw):
|
34
|
+
"""Definition of moment magnitude from Hanks and Kanamori (1979). Takes magnitude, returns moment in newton-meters.
|
35
|
+
|
36
|
+
:param Mw: moment magnitude
|
37
|
+
:type Mw: float
|
38
|
+
:returns: moment in Newton-meters
|
39
|
+
:rtype: float
|
40
|
+
"""
|
41
|
+
exponent = 1.5*Mw + 1.5*10.7
|
42
|
+
moment = np.power(10, exponent)
|
43
|
+
moment_newton_meters = moment * 1e-7
|
44
|
+
return moment_newton_meters
|