pyckster 26.1.2__py3-none-any.whl → 26.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyckster/__init__.py +2 -2
- pyckster/core.py +3041 -607
- pyckster/pick_io.py +13 -3
- pyckster/sw_utils.py +199 -0
- {pyckster-26.1.2.dist-info → pyckster-26.1.4.dist-info}/METADATA +13 -5
- {pyckster-26.1.2.dist-info → pyckster-26.1.4.dist-info}/RECORD +10 -11
- pyckster/pac_inversion.py +0 -785
- {pyckster-26.1.2.dist-info → pyckster-26.1.4.dist-info}/WHEEL +0 -0
- {pyckster-26.1.2.dist-info → pyckster-26.1.4.dist-info}/entry_points.txt +0 -0
- {pyckster-26.1.2.dist-info → pyckster-26.1.4.dist-info}/licenses/LICENCE +0 -0
- {pyckster-26.1.2.dist-info → pyckster-26.1.4.dist-info}/top_level.txt +0 -0
pyckster/pac_inversion.py
DELETED
|
@@ -1,785 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
"""
|
|
3
|
-
Author : José CUNHA TEIXEIRA
|
|
4
|
-
Affiliation : SNCF Réseau, UMR 7619 METIS (Sorbonne University), Mines Paris - PSL
|
|
5
|
-
License : Creative Commons Attribution 4.0 International
|
|
6
|
-
Date : Feb 4, 2025
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
import os
|
|
10
|
-
import sys
|
|
11
|
-
import json
|
|
12
|
-
import argparse
|
|
13
|
-
import numpy as np
|
|
14
|
-
from time import time
|
|
15
|
-
from disba import PhaseDispersion
|
|
16
|
-
import bayesbay as bb
|
|
17
|
-
from bayesbay import State
|
|
18
|
-
from bayesbay._state import ParameterSpaceState
|
|
19
|
-
from bayesbay.likelihood import LogLikelihood
|
|
20
|
-
from scipy.interpolate import interp1d
|
|
21
|
-
from disba import DispersionError
|
|
22
|
-
|
|
23
|
-
import arviz as az
|
|
24
|
-
import matplotlib.pyplot as plt
|
|
25
|
-
import matplotlib.colors as colors
|
|
26
|
-
|
|
27
|
-
sys.path.append("./modules/")
|
|
28
|
-
from misc import arange
|
|
29
|
-
from display import display_dispersion_img
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
FONT_SIZE = 9
|
|
34
|
-
plt.rcParams.update({'font.size': FONT_SIZE})
|
|
35
|
-
CM = 1/2.54
|
|
36
|
-
|
|
37
|
-
VP_VS = 1.77
|
|
38
|
-
|
|
39
|
-
tic = time()
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
### FUNCTIONS -------------------------------------------------------------------------------------
|
|
44
|
-
class CustomParametrization(bb.parameterization.Parameterization):
|
|
45
|
-
def __init__(self, param_space, modes, fs_per_mode):
|
|
46
|
-
super().__init__(param_space)
|
|
47
|
-
self.modes = modes
|
|
48
|
-
self.fs_per_mode = fs_per_mode
|
|
49
|
-
|
|
50
|
-
def initialize(self):
|
|
51
|
-
param_values = dict()
|
|
52
|
-
for ps_name, ps in self.parameter_spaces.items():
|
|
53
|
-
param_values[ps_name] = self.initialize_param_space(ps)
|
|
54
|
-
return State(param_values)
|
|
55
|
-
|
|
56
|
-
def initialize_param_space(self, param_space):
|
|
57
|
-
unstable = True
|
|
58
|
-
while unstable:
|
|
59
|
-
vs_vals = []
|
|
60
|
-
thick_vals = []
|
|
61
|
-
for name, param in param_space.parameters.items():
|
|
62
|
-
vmin, vmax = param.get_vmin_vmax(None)
|
|
63
|
-
if 'vs' in name:
|
|
64
|
-
vs_vals.append(np.random.uniform(vmin, vmax))
|
|
65
|
-
elif 'thick' in name:
|
|
66
|
-
thick_vals.append(np.random.uniform(vmin, vmax))
|
|
67
|
-
vs_vals = np.sort(vs_vals)
|
|
68
|
-
thick_vals = np.sort(thick_vals)
|
|
69
|
-
vp_vals = vs_vals * VP_VS
|
|
70
|
-
rho_vals = 0.32 * vp_vals + 0.77*1000
|
|
71
|
-
velocity_model = np.column_stack((np.append(thick_vals, 1000), vp_vals, vs_vals, rho_vals))
|
|
72
|
-
velocity_model /= 1000 # m to km and kg/m^3 to g/cm^3
|
|
73
|
-
try:
|
|
74
|
-
for mode, fs in zip(self.modes, self.fs_per_mode):
|
|
75
|
-
pd = PhaseDispersion(*velocity_model.T)
|
|
76
|
-
periods = 1 / fs[::-1]
|
|
77
|
-
d_pred = pd(periods, mode=mode, wave="rayleigh").velocity
|
|
78
|
-
if d_pred.shape[0] != periods.shape[0]: # Test if the dispersion curve is too short - It is often the case for low velocities (i.e. high periods) on superior modes
|
|
79
|
-
raise DispersionError(f"Dispersion curve length for mode {mode} is not the same as the observed one")
|
|
80
|
-
unstable = False
|
|
81
|
-
print(f'ID {ID} | x_mid {x_mid} | Found stable initialisation')
|
|
82
|
-
except DispersionError:
|
|
83
|
-
unstable = True
|
|
84
|
-
vals = np.concatenate((vs_vals, thick_vals))
|
|
85
|
-
param_values = dict()
|
|
86
|
-
for i, name in enumerate(param_space.parameters.keys()):
|
|
87
|
-
param_values[name] = np.array([vals[i]])
|
|
88
|
-
return ParameterSpaceState(1, param_values)
|
|
89
|
-
|
|
90
|
-
def forward_model_disba(thick_vals, vs_vals, mode, fs):
|
|
91
|
-
vp_vals = vs_vals * VP_VS
|
|
92
|
-
rho_vals = 0.32 * vp_vals + 0.77*1000
|
|
93
|
-
velocity_model = np.column_stack((thick_vals, vp_vals, vs_vals, rho_vals))
|
|
94
|
-
velocity_model /= 1000 # m to km and kg/m^3 to g/cm^3
|
|
95
|
-
pd = PhaseDispersion(*velocity_model.T)# dc=0.000005)
|
|
96
|
-
periods = 1 / fs[::-1] # Hz to s and reverse
|
|
97
|
-
pd = pd(periods, mode=mode, wave="rayleigh")
|
|
98
|
-
vr = pd.velocity
|
|
99
|
-
if pd.period.shape[0] < periods.shape[0]: # If the dispersion curve is too short - It is often the case for low velocities (i.e. high periods) on superior modes
|
|
100
|
-
vr = np.append(vr, [np.nan]*(periods.shape[0] - pd.period.shape[0]))
|
|
101
|
-
vr = vr[::-1]*1000 # km/s to m/s and over frequencies
|
|
102
|
-
return vr
|
|
103
|
-
|
|
104
|
-
def fwd_function(state, mode, fs):
|
|
105
|
-
vs_vals = [state["space"][f"vs{i+1}"][0] for i in range(nb_layers)]
|
|
106
|
-
thick_vals = [state["space"][f"thick{i+1}"][0] for i in range(nb_layers-1)]
|
|
107
|
-
thick_vals.append(1000)
|
|
108
|
-
vs_vals = np.array(vs_vals)
|
|
109
|
-
thick_vals = np.array(thick_vals)
|
|
110
|
-
vr = forward_model_disba(thick_vals, vs_vals, mode, fs)
|
|
111
|
-
return vr
|
|
112
|
-
### -----------------------------------------------------------------------------------------------
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
### ARGUMENTS -------------------------------------------------------------------------------------
|
|
117
|
-
parser = argparse.ArgumentParser(description="Process an ID argument.")
|
|
118
|
-
parser.add_argument("-ID", type=int, required=True, help="ID of the script")
|
|
119
|
-
parser.add_argument("-r", type=str, required=True, help="Path to the folder containing the data")
|
|
120
|
-
args = parser.parse_args()
|
|
121
|
-
ID = f"{int(args.ID)}"
|
|
122
|
-
folder_path = args.r
|
|
123
|
-
### -----------------------------------------------------------------------------------------------
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
### READ JSON -------------------------------------------------------------------------------------
|
|
128
|
-
with open(f"{folder_path}/inversion_params.json", "r") as file:
|
|
129
|
-
inversion_params = json.load(file)
|
|
130
|
-
|
|
131
|
-
x_mid = inversion_params["running_distribution"][ID]["x_mid"]
|
|
132
|
-
|
|
133
|
-
modes = inversion_params["inversion"]["modes"]
|
|
134
|
-
modes = sorted([int(mode) for mode in modes])
|
|
135
|
-
|
|
136
|
-
nb_layers = inversion_params["inversion"]["nb_layers"]
|
|
137
|
-
|
|
138
|
-
thickness_mins = np.array(inversion_params["inversion"]["thickness_mins"], dtype=np.float64)
|
|
139
|
-
thickness_maxs = np.array(inversion_params["inversion"]["thickness_maxs"], dtype=np.float64)
|
|
140
|
-
thickness_perturb_stds = np.array(inversion_params["inversion"]["thickness_perturb_stds"])
|
|
141
|
-
|
|
142
|
-
vs_mins = np.array(inversion_params["inversion"]["vs_mins"], dtype=np.float64)
|
|
143
|
-
vs_maxs = np.array(inversion_params["inversion"]["vs_maxs"], dtype=np.float64)
|
|
144
|
-
vs_perturb_stds = np.array(inversion_params["inversion"]["vs_perturb_stds"])
|
|
145
|
-
|
|
146
|
-
n_iterations = inversion_params["inversion"]["n_iterations"]
|
|
147
|
-
n_burnin_iterations = inversion_params["inversion"]["n_burnin_iterations"]
|
|
148
|
-
n_chains = inversion_params["inversion"]["n_chains"]
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
with open(f"{folder_path}/computing_params.json", "r") as f:
|
|
152
|
-
computing_params = json.load(f)
|
|
153
|
-
Nx = computing_params["MASW_length"]
|
|
154
|
-
dx = computing_params["positions"][1] - computing_params["positions"][0]
|
|
155
|
-
### -----------------------------------------------------------------------------------------------
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
### READ DISPERSION CURVE -------------------------------------------------------------------------
|
|
160
|
-
fs_obs_per_mode = []
|
|
161
|
-
vr_obs_per_mode = []
|
|
162
|
-
err_obs_per_mode = []
|
|
163
|
-
existing_modes = []
|
|
164
|
-
for mode in modes:
|
|
165
|
-
try:
|
|
166
|
-
data_obs = np.loadtxt(f"{folder_path}/xmid{x_mid}/pick/xmid{x_mid}_obs_M{mode}.pvc")
|
|
167
|
-
except:
|
|
168
|
-
continue
|
|
169
|
-
fs_obs_per_mode.append(data_obs[:,0])
|
|
170
|
-
vr_obs_per_mode.append(data_obs[:,1])
|
|
171
|
-
err_obs_per_mode.append(data_obs[:,2])
|
|
172
|
-
existing_modes.append(mode)
|
|
173
|
-
modes = existing_modes
|
|
174
|
-
### -----------------------------------------------------------------------------------------------
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
### OUTPUT DIRECTORY ------------------------------------------------------------------------------
|
|
179
|
-
output_dir = f"{folder_path}/xmid{x_mid}/inv/"
|
|
180
|
-
if not os.path.exists(output_dir):
|
|
181
|
-
os.makedirs(output_dir)
|
|
182
|
-
|
|
183
|
-
log_file = open(f"{output_dir}/xmid{x_mid}_output.log", "w")
|
|
184
|
-
sys.stdout = log_file
|
|
185
|
-
sys.stderr = log_file
|
|
186
|
-
|
|
187
|
-
print(f'ID {ID} | x_mid {x_mid} | Parameters loaded')
|
|
188
|
-
### -----------------------------------------------------------------------------------------------
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
### INVERSION -------------------------------------------------------------------------------------
|
|
193
|
-
# Targets
|
|
194
|
-
targets = []
|
|
195
|
-
for mode, err_obs, vr_obs in zip(modes, err_obs_per_mode, vr_obs_per_mode):
|
|
196
|
-
covariance_mat_inv = np.diag(1/err_obs**2)
|
|
197
|
-
target = bb.likelihood.Target(
|
|
198
|
-
name=f"rayleigh_M{mode}",
|
|
199
|
-
dobs=vr_obs,
|
|
200
|
-
covariance_mat_inv=covariance_mat_inv,
|
|
201
|
-
)
|
|
202
|
-
targets.append(target)
|
|
203
|
-
|
|
204
|
-
# Forward functions
|
|
205
|
-
fwd_functions = []
|
|
206
|
-
for mode, fs_obs in zip(modes, fs_obs_per_mode):
|
|
207
|
-
fwd_functions.append(lambda state, mode=mode, fs=fs_obs: fwd_function(state, mode, fs))
|
|
208
|
-
|
|
209
|
-
# Log-likelihood
|
|
210
|
-
log_likelihood = LogLikelihood(targets=targets, fwd_functions=fwd_functions)
|
|
211
|
-
|
|
212
|
-
# Priors
|
|
213
|
-
priors = []
|
|
214
|
-
for i in range(nb_layers):
|
|
215
|
-
priors.append(bb.prior.UniformPrior(name=f'vs{i+1}',
|
|
216
|
-
vmin=vs_mins[i],
|
|
217
|
-
vmax=vs_maxs[i],
|
|
218
|
-
perturb_std=vs_perturb_stds[i]))
|
|
219
|
-
for i in range(nb_layers-1):
|
|
220
|
-
priors.append(bb.prior.UniformPrior(name=f'thick{i+1}',
|
|
221
|
-
vmin=thickness_mins[i],
|
|
222
|
-
vmax=thickness_maxs[i],
|
|
223
|
-
perturb_std=thickness_perturb_stds[i]))
|
|
224
|
-
|
|
225
|
-
# Parameter space
|
|
226
|
-
param_space = bb.parameterization.ParameterSpace(
|
|
227
|
-
name="space",
|
|
228
|
-
n_dimensions=1,
|
|
229
|
-
parameters=priors,
|
|
230
|
-
)
|
|
231
|
-
|
|
232
|
-
# Parameterization
|
|
233
|
-
parameterization = CustomParametrization(param_space, modes, fs_obs_per_mode)
|
|
234
|
-
|
|
235
|
-
# Inversion
|
|
236
|
-
inversion = bb.BayesianInversion(
|
|
237
|
-
log_likelihood=log_likelihood,
|
|
238
|
-
parameterization=parameterization,
|
|
239
|
-
n_chains=n_chains,
|
|
240
|
-
)
|
|
241
|
-
|
|
242
|
-
print(f'ID {ID} | x_mid {x_mid} | Running inversion')
|
|
243
|
-
sys.stdout = sys.__stdout__
|
|
244
|
-
print(f'ID {ID} | x_mid {x_mid} | Running inversion')
|
|
245
|
-
sys.stdout = log_file
|
|
246
|
-
|
|
247
|
-
# Run inversion
|
|
248
|
-
inversion.run(
|
|
249
|
-
n_iterations=n_iterations,
|
|
250
|
-
burnin_iterations=n_burnin_iterations,
|
|
251
|
-
save_every=150,
|
|
252
|
-
verbose=False,
|
|
253
|
-
)
|
|
254
|
-
|
|
255
|
-
# Print statistics
|
|
256
|
-
print(f'ID {ID} | x_mid {x_mid} | Inversion statistics:')
|
|
257
|
-
|
|
258
|
-
for chain in inversion.chains:
|
|
259
|
-
chain.print_statistics()
|
|
260
|
-
### -----------------------------------------------------------------------------------------------
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
### PLOT RESULTS ----------------------------------------------------------------------------------
|
|
265
|
-
depth_max = np.nansum(thickness_maxs) + 1
|
|
266
|
-
dz = 0.01
|
|
267
|
-
|
|
268
|
-
results = inversion.get_results(concatenate_chains=True)
|
|
269
|
-
|
|
270
|
-
# Extract sampled models
|
|
271
|
-
all_sampled_vs = []
|
|
272
|
-
for i in range(nb_layers):
|
|
273
|
-
all_sampled_vs.append(np.array(results[f'space.vs{i+1}']).reshape(-1))
|
|
274
|
-
all_sampled_vs = np.array(all_sampled_vs)
|
|
275
|
-
|
|
276
|
-
all_sampled_thick = []
|
|
277
|
-
for i in range(nb_layers-1):
|
|
278
|
-
all_sampled_thick.append(np.array(results[f'space.thick{i+1}']).reshape(-1))
|
|
279
|
-
all_sampled_thick.append(np.ones_like(all_sampled_vs[-1]))
|
|
280
|
-
all_sampled_thick = np.array(all_sampled_thick)
|
|
281
|
-
|
|
282
|
-
all_sampled_gm = []
|
|
283
|
-
for vs_vals, thick_vals in zip(all_sampled_vs.T, all_sampled_thick.T):
|
|
284
|
-
gm = np.column_stack((thick_vals, vs_vals*VP_VS, vs_vals, 0.32*vs_vals*VP_VS + 0.77*1000))
|
|
285
|
-
all_sampled_gm.append(gm)
|
|
286
|
-
all_sampled_gm = np.array(all_sampled_gm)
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
# Misfits
|
|
291
|
-
misfits = np.zeros(len(results[f'rayleigh_M{modes[0]}.dpred']))
|
|
292
|
-
for mode, vr_obs in zip(modes, vr_obs_per_mode):
|
|
293
|
-
d_pred = np.array(results[f'rayleigh_M{mode}.dpred'])
|
|
294
|
-
for i, vr_pred in enumerate(d_pred):
|
|
295
|
-
misfits[i] += np.sum((vr_obs - vr_pred)**2)
|
|
296
|
-
misfits /= np.sum([len(vr_obs) for vr_obs in vr_obs_per_mode])
|
|
297
|
-
misfits = np.sqrt(misfits)
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
# Best layered model
|
|
301
|
-
idx = np.argmin(misfits)
|
|
302
|
-
best_layered_gm = all_sampled_gm[idx]
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
# Smooth best layered model
|
|
306
|
-
thick_vals = best_layered_gm[:,0]
|
|
307
|
-
vp_vals = best_layered_gm[:,1]
|
|
308
|
-
vs_vals = best_layered_gm[:,2]
|
|
309
|
-
rho_vals = best_layered_gm[:,3]
|
|
310
|
-
|
|
311
|
-
best_layered_gm[-1,0] = (depth_max - np.sum(best_layered_gm[:-1,0]))/2
|
|
312
|
-
|
|
313
|
-
depth_vals = [0]
|
|
314
|
-
vp_vals = [vp_vals[0]]
|
|
315
|
-
vs_vals = [vs_vals[0]]
|
|
316
|
-
rho_vals = [rho_vals[0]]
|
|
317
|
-
|
|
318
|
-
for i, (thick, vp, vs, rho) in enumerate(zip(best_layered_gm[:,0], best_layered_gm[:,1], best_layered_gm[:,2], best_layered_gm[:,3])):
|
|
319
|
-
current_thick = best_layered_gm[i,0]
|
|
320
|
-
current_vp = best_layered_gm[i,1]
|
|
321
|
-
current_vs = best_layered_gm[i,2]
|
|
322
|
-
current_rho = best_layered_gm[i,3]
|
|
323
|
-
|
|
324
|
-
depth_vals.append(depth_vals[-1] + current_thick/2)
|
|
325
|
-
vp_vals.append(current_vp)
|
|
326
|
-
vs_vals.append(current_vs)
|
|
327
|
-
rho_vals.append(current_rho)
|
|
328
|
-
|
|
329
|
-
if i == len(best_layered_gm)-1:
|
|
330
|
-
break
|
|
331
|
-
|
|
332
|
-
next_vp = best_layered_gm[i+1,1]
|
|
333
|
-
next_vs = best_layered_gm[i+1,2]
|
|
334
|
-
next_rho = best_layered_gm[i+1,3]
|
|
335
|
-
|
|
336
|
-
depth_vals.append(depth_vals[-1] + current_thick/2)
|
|
337
|
-
vp_vals.append((current_vp + next_vp)/2)
|
|
338
|
-
vs_vals.append((current_vs + next_vs)/2)
|
|
339
|
-
rho_vals.append((current_rho + next_rho)/2)
|
|
340
|
-
|
|
341
|
-
depth = depth_vals[-1] + dz
|
|
342
|
-
depth = np.round(depth, 2)
|
|
343
|
-
while depth < depth_max:
|
|
344
|
-
depth_vals = np.append(depth_vals, depth)
|
|
345
|
-
vp_vals = np.append(vp_vals, vp_vals[-1])
|
|
346
|
-
vs_vals = np.append(vs_vals, vs_vals[-1])
|
|
347
|
-
rho_vals = np.append(rho_vals, rho_vals[-1])
|
|
348
|
-
depth += dz
|
|
349
|
-
depth_vals = np.append(depth_vals, depth)
|
|
350
|
-
vp_vals = np.append(vp_vals, vp_vals[-1])
|
|
351
|
-
vs_vals = np.append(vs_vals, vs_vals[-1])
|
|
352
|
-
rho_vals = np.append(rho_vals, rho_vals[-1])
|
|
353
|
-
|
|
354
|
-
depth_vals = np.round(depth_vals, 2)
|
|
355
|
-
|
|
356
|
-
depth_vals = np.array(depth_vals)
|
|
357
|
-
vp_vals = np.array(vp_vals)
|
|
358
|
-
vs_vals = np.array(vs_vals)
|
|
359
|
-
rho_vals = np.array(rho_vals)
|
|
360
|
-
|
|
361
|
-
depth_vals_smooth = arange(min(depth_vals), max(depth_vals), dz)
|
|
362
|
-
depth_vals_smooth = np.round(depth_vals_smooth, 2)
|
|
363
|
-
|
|
364
|
-
f = interp1d(depth_vals, vp_vals, kind='cubic')
|
|
365
|
-
vp_smooth = f(depth_vals_smooth)
|
|
366
|
-
|
|
367
|
-
f = interp1d(depth_vals, vs_vals, kind='cubic')
|
|
368
|
-
vs_smooth = f(depth_vals_smooth)
|
|
369
|
-
|
|
370
|
-
f = interp1d(depth_vals, rho_vals, kind='cubic')
|
|
371
|
-
rho_smooth = f(depth_vals_smooth)
|
|
372
|
-
|
|
373
|
-
vp_smooth = vp_smooth[:-1]
|
|
374
|
-
vs_smooth = vs_smooth[:-1]
|
|
375
|
-
rho_smooth = rho_smooth[:-1]
|
|
376
|
-
|
|
377
|
-
smooth_best_layered_gm = np.column_stack((np.full_like(vp_smooth, dz), vp_smooth, vs_smooth, rho_smooth))
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
# Median layered model
|
|
381
|
-
median_layered_gm = np.median(all_sampled_gm, axis=0)
|
|
382
|
-
median_layered_std = np.std(all_sampled_gm, axis=0)
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
# Smooth median layered model
|
|
386
|
-
thick_vals = median_layered_gm[:,0]
|
|
387
|
-
vp_vals = median_layered_gm[:,1]
|
|
388
|
-
vs_vals = median_layered_gm[:,2]
|
|
389
|
-
rho_vals = median_layered_gm[:,3]
|
|
390
|
-
std_vp_vals = median_layered_std[:,1]
|
|
391
|
-
std_vs_vals = median_layered_std[:,2]
|
|
392
|
-
std_rho_vals = median_layered_std[:,3]
|
|
393
|
-
|
|
394
|
-
median_layered_gm[-1,0] = (depth_max - np.sum(median_layered_gm[:-1,0]))/2
|
|
395
|
-
|
|
396
|
-
depth_vals = [0]
|
|
397
|
-
vp_vals = [vp_vals[0]]
|
|
398
|
-
vs_vals = [vs_vals[0]]
|
|
399
|
-
rho_vals = [rho_vals[0]]
|
|
400
|
-
std_vp_vals = [std_vp_vals[0]]
|
|
401
|
-
std_vs_vals = [std_vs_vals[0]]
|
|
402
|
-
std_rho_vals = [std_rho_vals[0]]
|
|
403
|
-
|
|
404
|
-
for i, (thick, vp, vs, rho, std_vp, std_vs, std_rho) in enumerate(zip(median_layered_gm[:,0], median_layered_gm[:,1], median_layered_gm[:,2], median_layered_gm[:,3], median_layered_std[:,1], median_layered_std[:,2], median_layered_std[:,3])):
|
|
405
|
-
current_thick = median_layered_gm[i,0]
|
|
406
|
-
current_vp = median_layered_gm[i,1]
|
|
407
|
-
current_vs = median_layered_gm[i,2]
|
|
408
|
-
current_rho = median_layered_gm[i,3]
|
|
409
|
-
current_std_vp = median_layered_std[i,1]
|
|
410
|
-
current_std_vs = median_layered_std[i,2]
|
|
411
|
-
current_std_rho = median_layered_std[i,3]
|
|
412
|
-
|
|
413
|
-
depth_vals.append(depth_vals[-1] + current_thick/2)
|
|
414
|
-
vp_vals.append(current_vp)
|
|
415
|
-
vs_vals.append(current_vs)
|
|
416
|
-
rho_vals.append(current_rho)
|
|
417
|
-
std_vp_vals.append(current_std_vp)
|
|
418
|
-
std_vs_vals.append(current_std_vs)
|
|
419
|
-
std_rho_vals.append(current_std_rho)
|
|
420
|
-
|
|
421
|
-
if i == len(median_layered_gm)-1:
|
|
422
|
-
break
|
|
423
|
-
|
|
424
|
-
next_vp = median_layered_gm[i+1,1]
|
|
425
|
-
next_vs = median_layered_gm[i+1,2]
|
|
426
|
-
next_rho = median_layered_gm[i+1,3]
|
|
427
|
-
next_std_vp = median_layered_std[i+1,1]
|
|
428
|
-
next_std_vs = median_layered_std[i+1,2]
|
|
429
|
-
next_std_rho = median_layered_std[i+1,3]
|
|
430
|
-
|
|
431
|
-
depth_vals.append(depth_vals[-1] + current_thick/2)
|
|
432
|
-
vp_vals.append((current_vp + next_vp)/2)
|
|
433
|
-
vs_vals.append((current_vs + next_vs)/2)
|
|
434
|
-
rho_vals.append((current_rho + next_rho)/2)
|
|
435
|
-
std_vp_vals.append((current_std_vp + next_std_vp)/2)
|
|
436
|
-
std_vs_vals.append((current_std_vs + next_std_vs)/2)
|
|
437
|
-
std_rho_vals.append((current_std_rho + next_std_rho)/2)
|
|
438
|
-
|
|
439
|
-
depth = depth_vals[-1] + dz
|
|
440
|
-
depth = np.round(depth, 2)
|
|
441
|
-
while depth < depth_max:
|
|
442
|
-
depth_vals = np.append(depth_vals, depth)
|
|
443
|
-
vp_vals = np.append(vp_vals, vp_vals[-1])
|
|
444
|
-
vs_vals = np.append(vs_vals, vs_vals[-1])
|
|
445
|
-
rho_vals = np.append(rho_vals, rho_vals[-1])
|
|
446
|
-
std_vp_vals = np.append(std_vp_vals, std_vp_vals[-1])
|
|
447
|
-
std_vs_vals = np.append(std_vs_vals, std_vs_vals[-1])
|
|
448
|
-
std_rho_vals = np.append(std_rho_vals, std_rho_vals[-1])
|
|
449
|
-
depth += dz
|
|
450
|
-
depth_vals = np.append(depth_vals, depth)
|
|
451
|
-
vp_vals = np.append(vp_vals, vp_vals[-1])
|
|
452
|
-
vs_vals = np.append(vs_vals, vs_vals[-1])
|
|
453
|
-
rho_vals = np.append(rho_vals, rho_vals[-1])
|
|
454
|
-
std_vp_vals = np.append(std_vp_vals, std_vp_vals[-1])
|
|
455
|
-
std_vs_vals = np.append(std_vs_vals, std_vs_vals[-1])
|
|
456
|
-
std_rho_vals = np.append(std_rho_vals, std_rho_vals[-1])
|
|
457
|
-
|
|
458
|
-
depth_vals = np.round(depth_vals, 2)
|
|
459
|
-
|
|
460
|
-
depth_vals = np.array(depth_vals)
|
|
461
|
-
vp_vals = np.array(vp_vals)
|
|
462
|
-
vs_vals = np.array(vs_vals)
|
|
463
|
-
rho_vals = np.array(rho_vals)
|
|
464
|
-
std_vp_vals = np.array(std_vp_vals)
|
|
465
|
-
std_vs_vals = np.array(std_vs_vals)
|
|
466
|
-
std_rho_vals = np.array(std_rho_vals)
|
|
467
|
-
|
|
468
|
-
depth_vals_smooth = arange(min(depth_vals), max(depth_vals), dz)
|
|
469
|
-
depth_vals_smooth = np.round(depth_vals_smooth, 2)
|
|
470
|
-
|
|
471
|
-
f = interp1d(depth_vals, vp_vals, kind='cubic')
|
|
472
|
-
vp_smooth = f(depth_vals_smooth)
|
|
473
|
-
|
|
474
|
-
f = interp1d(depth_vals, vs_vals, kind='cubic')
|
|
475
|
-
vs_smooth = f(depth_vals_smooth)
|
|
476
|
-
|
|
477
|
-
f = interp1d(depth_vals, rho_vals, kind='cubic')
|
|
478
|
-
rho_smooth = f(depth_vals_smooth)
|
|
479
|
-
|
|
480
|
-
f = interp1d(depth_vals, std_vp_vals, kind='cubic')
|
|
481
|
-
std_vp_smooth = f(depth_vals_smooth)
|
|
482
|
-
|
|
483
|
-
f = interp1d(depth_vals, std_vs_vals, kind='cubic')
|
|
484
|
-
std_vs_smooth = f(depth_vals_smooth)
|
|
485
|
-
|
|
486
|
-
f = interp1d(depth_vals, std_rho_vals, kind='cubic')
|
|
487
|
-
std_rho_smooth = f(depth_vals_smooth)
|
|
488
|
-
|
|
489
|
-
vp_smooth = vp_smooth[:-1]
|
|
490
|
-
vs_smooth = vs_smooth[:-1]
|
|
491
|
-
rho_smooth = rho_smooth[:-1]
|
|
492
|
-
std_vp_smooth = std_vp_smooth[:-1]
|
|
493
|
-
std_vs_smooth = std_vs_smooth[:-1]
|
|
494
|
-
std_rho_smooth = std_rho_smooth[:-1]
|
|
495
|
-
|
|
496
|
-
smooth_median_layered_gm = np.column_stack((np.full_like(vp_smooth, dz), vp_smooth, vs_smooth, rho_smooth))
|
|
497
|
-
smooth_median_layered_std = np.column_stack((np.full_like(vp_smooth, dz), std_vp_smooth, std_vs_smooth, std_rho_smooth))
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
# Median ensemble model
|
|
502
|
-
all_ensemble_gm = []
|
|
503
|
-
for gm in all_sampled_gm:
|
|
504
|
-
ensemble_gm = []
|
|
505
|
-
for thick, vp, vs, rho in gm:
|
|
506
|
-
ensemble_gm += [[dz, vp, vs, rho]]*int(thick/dz)
|
|
507
|
-
if len(ensemble_gm) < depth_max/dz:
|
|
508
|
-
ensemble_gm += [[dz, vp, vs, rho]]*int(depth_max/dz - len(ensemble_gm))
|
|
509
|
-
all_ensemble_gm.append(ensemble_gm)
|
|
510
|
-
all_ensemble_gm = np.array(all_ensemble_gm)
|
|
511
|
-
|
|
512
|
-
median_ensemble_gm = np.median(all_ensemble_gm, axis=0)
|
|
513
|
-
median_ensemble_std = np.std(all_ensemble_gm, axis=0)
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
median_layered_gm[-1,0] = 1
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
# Save models
|
|
520
|
-
print(f'ID {ID} | x_mid {x_mid} | Saving results in {output_dir}')
|
|
521
|
-
|
|
522
|
-
# Save best layered model
|
|
523
|
-
with open(f"{output_dir}/xmid{x_mid}_best_layered_model.gm", "w") as f:
|
|
524
|
-
f.write(f"{len(best_layered_gm)}\n")
|
|
525
|
-
np.savetxt(f, best_layered_gm, fmt="%.4f")
|
|
526
|
-
# Save best layered dispersion
|
|
527
|
-
for mode, fs_obs in zip(modes, fs_obs_per_mode):
|
|
528
|
-
best_layered_dc = forward_model_disba(best_layered_gm[:,0], best_layered_gm[:,2], mode, fs_obs)
|
|
529
|
-
with open(f"{output_dir}/xmid{x_mid}_best_layered_M{mode}.pvc", "w") as f:
|
|
530
|
-
np.savetxt(f, np.column_stack((fs_obs, best_layered_dc)), fmt="%.4f")
|
|
531
|
-
|
|
532
|
-
# Save smooth best layered model
|
|
533
|
-
with open(f"{output_dir}/xmid{x_mid}_smooth_best_layered_model.gm", "w") as f:
|
|
534
|
-
f.write(f"{len(smooth_best_layered_gm)}\n")
|
|
535
|
-
np.savetxt(f, smooth_best_layered_gm, fmt="%.4f")
|
|
536
|
-
# Save smooth best layered dispersion
|
|
537
|
-
for mode, fs_obs in zip(modes, fs_obs_per_mode):
|
|
538
|
-
smooth_best_layered_dc = forward_model_disba(smooth_best_layered_gm[:,0], smooth_best_layered_gm[:,2], mode, fs_obs)
|
|
539
|
-
with open(f"{output_dir}/xmid{x_mid}_smooth_best_layered_M{mode}.pvc", "w") as f:
|
|
540
|
-
np.savetxt(f, np.column_stack((fs_obs, smooth_best_layered_dc)), fmt="%.4f")
|
|
541
|
-
|
|
542
|
-
# Save median layered model
|
|
543
|
-
with open(f"{output_dir}/xmid{x_mid}_median_layered_model.gm", "w") as f:
|
|
544
|
-
f.write(f"{len(median_layered_gm)}\n")
|
|
545
|
-
np.savetxt(f, median_layered_gm, fmt="%.4f")
|
|
546
|
-
# Save median layered std
|
|
547
|
-
with open(f"{output_dir}/xmid{x_mid}_median_layered_std.gm", "w") as f:
|
|
548
|
-
f.write(f"{len(median_layered_std)}\n")
|
|
549
|
-
np.savetxt(f, median_layered_std, fmt="%.4f")
|
|
550
|
-
# Save median layered dispersion
|
|
551
|
-
for mode, fs_obs in zip(modes, fs_obs_per_mode):
|
|
552
|
-
median_layered_dc = forward_model_disba(median_layered_gm[:,0], median_layered_gm[:,2], mode, fs_obs)
|
|
553
|
-
with open(f"{output_dir}/xmid{x_mid}_median_layered_M{mode}.pvc", "w") as f:
|
|
554
|
-
np.savetxt(f, np.column_stack((fs_obs, median_layered_dc)), fmt="%.4f")
|
|
555
|
-
|
|
556
|
-
# Save smooth median layered model
|
|
557
|
-
with open(f"{output_dir}/xmid{x_mid}_smooth_median_layered_model.gm", "w") as f:
|
|
558
|
-
f.write(f"{len(smooth_median_layered_gm)}\n")
|
|
559
|
-
np.savetxt(f, smooth_median_layered_gm, fmt="%.4f")
|
|
560
|
-
# Save smooth median layered std
|
|
561
|
-
with open(f"{output_dir}/xmid{x_mid}_smooth_median_layered_std.gm", "w") as f:
|
|
562
|
-
f.write(f"{len(smooth_median_layered_std)}\n")
|
|
563
|
-
np.savetxt(f, smooth_median_layered_std, fmt="%.4f")
|
|
564
|
-
# Save smooth median layered dispersion
|
|
565
|
-
for mode, fs_obs in zip(modes, fs_obs_per_mode):
|
|
566
|
-
smooth_median_layered_dc = forward_model_disba(smooth_median_layered_gm[:,0], smooth_median_layered_gm[:,2], mode, fs_obs)
|
|
567
|
-
with open(f"{output_dir}/xmid{x_mid}_smooth_median_layered_M{mode}.pvc", "w") as f:
|
|
568
|
-
np.savetxt(f, np.column_stack((fs_obs, smooth_median_layered_dc)), fmt="%.4f")
|
|
569
|
-
|
|
570
|
-
# Save median ensemble model
|
|
571
|
-
with open(f"{output_dir}/xmid{x_mid}_median_ensemble_model.gm", "w") as f:
|
|
572
|
-
f.write(f"{len(median_ensemble_gm)}\n")
|
|
573
|
-
np.savetxt(f, median_ensemble_gm, fmt="%.4f")
|
|
574
|
-
# Save median ensemble std
|
|
575
|
-
with open(f"{output_dir}/xmid{x_mid}_median_ensemble_std.gm", "w") as f:
|
|
576
|
-
f.write(f"{len(median_ensemble_std)}\n")
|
|
577
|
-
np.savetxt(f, median_ensemble_std, fmt="%.4f")
|
|
578
|
-
# Save median ensemble dispersion
|
|
579
|
-
for mode, fs_obs in zip(modes, fs_obs_per_mode):
|
|
580
|
-
median_ensemble_dc = forward_model_disba(median_ensemble_gm[:,0], median_ensemble_gm[:,2], mode, fs_obs)
|
|
581
|
-
with open(f"{output_dir}/xmid{x_mid}_median_ensemble_M{mode}.pvc", "w") as f:
|
|
582
|
-
np.savetxt(f, np.column_stack((fs_obs, median_ensemble_dc)), fmt="%.4f")
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
print(f'ID {ID} | x_mid {x_mid} | Plotting results in {output_dir}')
|
|
587
|
-
|
|
588
|
-
# Update dispersion image with all obs and pred modes
|
|
589
|
-
FV = np.loadtxt(f"{folder_path}/xmid{x_mid}/comp/xmid{x_mid}_dispersion.csv", delimiter=",")
|
|
590
|
-
fs = np.loadtxt(f"{folder_path}/xmid{x_mid}/comp/xmid{x_mid}_fs.csv", delimiter=",")
|
|
591
|
-
vs = np.loadtxt(f"{folder_path}/xmid{x_mid}/comp/xmid{x_mid}_vs.csv", delimiter=",")
|
|
592
|
-
|
|
593
|
-
obs_modes = []
|
|
594
|
-
for mode in modes:
|
|
595
|
-
pvc = np.loadtxt(f"{folder_path}/xmid{x_mid}/pick/xmid{x_mid}_obs_M{mode}.pvc")
|
|
596
|
-
obs_modes.append(pvc)
|
|
597
|
-
|
|
598
|
-
pred_modes = []
|
|
599
|
-
for mode, fs_obs in zip(modes, fs_obs_per_mode):
|
|
600
|
-
pred_modes.append(np.column_stack((fs_obs, forward_model_disba(median_layered_gm[:,0], median_layered_gm[:,2], mode, fs_obs))))
|
|
601
|
-
|
|
602
|
-
full_pred_modes = []
|
|
603
|
-
inRange = True
|
|
604
|
-
mode = 0
|
|
605
|
-
velocity_model = median_layered_gm/1000
|
|
606
|
-
pd = PhaseDispersion(*velocity_model.T)
|
|
607
|
-
periods = 1 / fs[fs>0]
|
|
608
|
-
periods = periods[::-1]
|
|
609
|
-
while inRange:
|
|
610
|
-
data = pd(periods, mode=mode, wave="rayleigh")
|
|
611
|
-
if data.period.shape[0] == 0:
|
|
612
|
-
inRange = False
|
|
613
|
-
mode+=1
|
|
614
|
-
full_pred_modes.append(np.column_stack((1/data.period[::-1], data.velocity[::-1]*1000)))
|
|
615
|
-
|
|
616
|
-
name_path = f"{output_dir}/xmid{x_mid}_dispersion.svg"
|
|
617
|
-
display_dispersion_img(FV, fs, vs, obs_modes=obs_modes, pred_modes=pred_modes, full_pred_modes=full_pred_modes, path=name_path, normalization='Frequency', dx=dx)
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
# Plot inversion results
|
|
622
|
-
fig, axs = plt.subplots(1, 2, figsize=(18*CM, 12*CM), dpi=300)
|
|
623
|
-
|
|
624
|
-
ax = axs[0]
|
|
625
|
-
|
|
626
|
-
for i, (fs_obs, vr_obs, err_obs, mode) in enumerate(zip(fs_obs_per_mode, vr_obs_per_mode, err_obs_per_mode, modes)):
|
|
627
|
-
# Plot inferred data
|
|
628
|
-
d_pred = np.array(results[f'rayleigh_M{mode}.dpred'])
|
|
629
|
-
percentiles = np.percentile(d_pred, (10, 50, 90), axis=0)
|
|
630
|
-
label = '10th-90th percentiles' if i == 0 else '_nolegend_'
|
|
631
|
-
ax.fill_between(fs_obs, percentiles[0], percentiles[2], color='k', alpha=0.2, label=label, zorder=1)
|
|
632
|
-
label = '50th percentile' if i == 0 else '_nolegend_'
|
|
633
|
-
ax.plot(fs_obs, percentiles[1], color='k', label=label, linewidth=0.2, linestyle='--', zorder=2)
|
|
634
|
-
|
|
635
|
-
# Plot observed data
|
|
636
|
-
label = 'Observed Data' if i == 0 else '_nolegend_'
|
|
637
|
-
ax.errorbar(fs_obs, vr_obs, yerr=err_obs, fmt='o', color='tab:blue', label=label, markersize=1.5, capsize=0, elinewidth=0.3, zorder=3)
|
|
638
|
-
|
|
639
|
-
# Plot best layered model
|
|
640
|
-
label = 'Best layered model' if i == 0 else '_nolegend_'
|
|
641
|
-
ax.plot(fs_obs, forward_model_disba(best_layered_gm[:,0], best_layered_gm[:,2], mode, fs_obs), 'o', color='tab:green', label=label, markersize=1.5, zorder=4)
|
|
642
|
-
|
|
643
|
-
# Plot smooth best layered model
|
|
644
|
-
label = 'Smooth best layered model' if i == 0 else '_nolegend_'
|
|
645
|
-
ax.plot(fs_obs, forward_model_disba(smooth_best_layered_gm[:,0], smooth_best_layered_gm[:,2], mode, fs_obs), 'o', color='green', label=label, markersize=1.5, zorder=6)
|
|
646
|
-
|
|
647
|
-
# Plot median layered model
|
|
648
|
-
label = 'Median layered model' if i == 0 else '_nolegend_'
|
|
649
|
-
ax.plot(fs_obs, forward_model_disba(median_layered_gm[:,0], median_layered_gm[:,2], mode, fs_obs), 'o', color='orange', label=label, markersize=1.5, zorder=4)
|
|
650
|
-
|
|
651
|
-
# Plot smooth median layered model
|
|
652
|
-
label = 'Smooth median layered model' if i == 0 else '_nolegend_'
|
|
653
|
-
ax.plot(fs_obs, forward_model_disba(smooth_median_layered_gm[:,0], smooth_median_layered_gm[:,2], mode, fs_obs), 'o', color='tab:orange', label=label, markersize=1.5, zorder=6)
|
|
654
|
-
|
|
655
|
-
# Plot median ensemble model
|
|
656
|
-
label = 'Median ensemble model' if i == 0 else '_nolegend_'
|
|
657
|
-
ax.plot(fs_obs, forward_model_disba(median_ensemble_gm[:,0], median_ensemble_gm[:,2], mode, fs_obs), 'o', color='tab:red', label=label, markersize=1.5, zorder=5)
|
|
658
|
-
|
|
659
|
-
ax.set_xlabel('Frequency [Hz]')
|
|
660
|
-
ax.set_ylabel('Phase velocity $v_{R}$ [m/s]')
|
|
661
|
-
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2))
|
|
662
|
-
|
|
663
|
-
ax = axs[1]
|
|
664
|
-
|
|
665
|
-
# Plot all models
|
|
666
|
-
cmap = plt.get_cmap('Greys_r')
|
|
667
|
-
def _forward(x):
|
|
668
|
-
return np.log(x)
|
|
669
|
-
def _inverse(x):
|
|
670
|
-
return np.exp(x)
|
|
671
|
-
norm = colors.FuncNorm((_forward, _inverse), vmin=np.min(misfits), vmax=np.max(misfits))
|
|
672
|
-
sorted_idx = np.argsort(misfits)[::-1]
|
|
673
|
-
all_sampled_gm = all_sampled_gm[sorted_idx]
|
|
674
|
-
misfits = misfits[sorted_idx]
|
|
675
|
-
for gm, misfit in zip(all_sampled_gm, misfits):
|
|
676
|
-
thick_vals = np.copy(gm[:,0])
|
|
677
|
-
vs_vals = np.copy(gm[:,2])
|
|
678
|
-
thick_vals[-1] = depth_max - np.sum(thick_vals[:-2])
|
|
679
|
-
depth_vals = np.cumsum(thick_vals)
|
|
680
|
-
depth_vals = np.insert(depth_vals, 0, 0)
|
|
681
|
-
vs_vals = np.append(vs_vals, vs_vals[-1])
|
|
682
|
-
ax.step(vs_vals, depth_vals, color=cmap(norm(misfit)), label='_nolegend_', linewidth=0.5)
|
|
683
|
-
|
|
684
|
-
# Plot best layered model
|
|
685
|
-
thick_vals = np.copy(best_layered_gm[:,0])
|
|
686
|
-
vs_vals = np.copy(best_layered_gm[:,2])
|
|
687
|
-
thick_vals[-1] = depth_max - np.sum(thick_vals[:-2])
|
|
688
|
-
depth_vals = np.cumsum(thick_vals)
|
|
689
|
-
depth_vals = np.insert(depth_vals, 0, 0)
|
|
690
|
-
vs_vals = np.append(vs_vals, vs_vals[-1])
|
|
691
|
-
ax.step(vs_vals, depth_vals, color='tab:green', label='Best layered model', linewidth=1)
|
|
692
|
-
|
|
693
|
-
# Plot smooth best layered model
|
|
694
|
-
thick_vals = np.copy(smooth_best_layered_gm[:,0])
|
|
695
|
-
vs_vals = np.copy(smooth_best_layered_gm[:,2])
|
|
696
|
-
thick_vals[-1] = depth_max - np.sum(thick_vals[:-2])
|
|
697
|
-
depth_vals = np.cumsum(thick_vals)
|
|
698
|
-
depth_vals = np.insert(depth_vals, 0, 0)
|
|
699
|
-
vs_vals = np.append(vs_vals, vs_vals[-1])
|
|
700
|
-
ax.step(vs_vals, depth_vals, color='green', label='Smooth best layered model', linewidth=1)
|
|
701
|
-
|
|
702
|
-
# Plot median layered model
|
|
703
|
-
thick_vals = np.copy(median_layered_gm[:,0])
|
|
704
|
-
vs_vals = np.copy(median_layered_gm[:,2])
|
|
705
|
-
thick_vals[-1] = depth_max - np.sum(thick_vals[:-2])
|
|
706
|
-
depth_vals = np.cumsum(thick_vals)
|
|
707
|
-
depth_vals = np.insert(depth_vals, 0, 0)
|
|
708
|
-
vs_vals = np.append(vs_vals, vs_vals[-1])
|
|
709
|
-
ax.step(vs_vals, depth_vals, color='orange', label='Median layered model', linewidth=1)
|
|
710
|
-
|
|
711
|
-
# Plot smooth median layered model
|
|
712
|
-
thick_vals = np.copy(smooth_median_layered_gm[:,0])
|
|
713
|
-
vs_vals = np.copy(smooth_median_layered_gm[:,2])
|
|
714
|
-
thick_vals[-1] = depth_max - np.sum(thick_vals[:-2])
|
|
715
|
-
depth_vals = np.cumsum(thick_vals)
|
|
716
|
-
depth_vals = np.insert(depth_vals, 0, 0)
|
|
717
|
-
vs_vals = np.append(vs_vals, vs_vals[-1])
|
|
718
|
-
ax.step(vs_vals, depth_vals, color='tab:orange', label='Smooth median layered model', linewidth=1)
|
|
719
|
-
# Plot std smooth model
|
|
720
|
-
std_vs_vals = np.copy(smooth_median_layered_std[:,2])
|
|
721
|
-
std_vs_vals = np.append(std_vs_vals, std_vs_vals[-1])
|
|
722
|
-
ax.step(vs_vals-std_vs_vals, depth_vals, color='tab:orange', label='Standard deviation', linewidth=1, linestyle='dotted')
|
|
723
|
-
ax.step(vs_vals+std_vs_vals, depth_vals, color='tab:orange', label='_nolegend_', linewidth=1, linestyle='dotted')
|
|
724
|
-
|
|
725
|
-
# Plot median ensemble model
|
|
726
|
-
thick_vals = np.copy(median_ensemble_gm[:,0])
|
|
727
|
-
vs_vals = np.copy(median_ensemble_gm[:,2])
|
|
728
|
-
thick_vals[-1] = depth_max - np.sum(thick_vals[:-2])
|
|
729
|
-
depth_vals = np.cumsum(thick_vals)
|
|
730
|
-
depth_vals = np.insert(depth_vals, 0, 0)
|
|
731
|
-
vs_vals = np.append(vs_vals, vs_vals[-1])
|
|
732
|
-
ax.step(vs_vals, depth_vals, color='tab:red', label='Median ensemble model', linewidth=1)
|
|
733
|
-
|
|
734
|
-
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2))
|
|
735
|
-
ax.invert_yaxis()
|
|
736
|
-
ax.set_ylim(depth_max, 0)
|
|
737
|
-
ax.set_xlabel('Shear wave velocity $v_{S}$ [m/s]')
|
|
738
|
-
ax.set_ylabel('Depth [m]')
|
|
739
|
-
|
|
740
|
-
plt.tight_layout()
|
|
741
|
-
fig.savefig(f"{output_dir}/xmid{x_mid}_density_curves.png")
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
print(f'ID {ID} | x_mid {x_mid} | Plotting marginals in {output_dir}')
|
|
746
|
-
|
|
747
|
-
results = inversion.get_results()
|
|
748
|
-
samples = {f'$v_{{s{i+1}}}$ [m/s]': np.array(results[f'space.vs{i+1}']).T for i in range(nb_layers)}
|
|
749
|
-
samples.update({f'$H_{{{i+1}}}$ [m]': np.array(results[f'space.thick{i+1}']).T for i in range(nb_layers-1)})
|
|
750
|
-
|
|
751
|
-
rows = nb_layers*2-1
|
|
752
|
-
cols = rows
|
|
753
|
-
width = 2.6 * CM * cols
|
|
754
|
-
height = 2.4 * CM * rows
|
|
755
|
-
az.rcParams["plot.max_subplots"] = rows*cols
|
|
756
|
-
fig, axs = plt.subplots(rows, cols, figsize=(width, height), dpi=300)
|
|
757
|
-
_ = az.plot_pair(
|
|
758
|
-
samples,
|
|
759
|
-
marginals=True,
|
|
760
|
-
kind='kde',
|
|
761
|
-
kde_kwargs={
|
|
762
|
-
'hdi_probs': [0.3, 0.6, 0.9], # Plot 30%, 60% and 90% HDI contours
|
|
763
|
-
'contourf_kwargs': {'cmap': 'Blues'},
|
|
764
|
-
},
|
|
765
|
-
ax=axs,
|
|
766
|
-
textsize=FONT_SIZE,
|
|
767
|
-
colorbar=True,
|
|
768
|
-
)
|
|
769
|
-
|
|
770
|
-
for i in range(rows):
|
|
771
|
-
axs[i,i].set_ylabel('Probability')
|
|
772
|
-
axs[i,i].yaxis.set_label_position("right")
|
|
773
|
-
axs[i,i].yaxis.tick_right()
|
|
774
|
-
if i==0:
|
|
775
|
-
legend = 'a priori'
|
|
776
|
-
else:
|
|
777
|
-
legend = '_nolegend_'
|
|
778
|
-
plt.savefig(f"{output_dir}/xmid{x_mid}_marginals.png")
|
|
779
|
-
|
|
780
|
-
toc = time()
|
|
781
|
-
|
|
782
|
-
print(f'ID {ID} | x_mid {x_mid} | Inversion completed in {toc-tic:.1f} s')
|
|
783
|
-
sys.stdout = sys.__stdout__
|
|
784
|
-
print(f'\033[92mID {ID} | x_mid {x_mid} | Inversion completed in {toc-tic:.1f} s\033[0m')
|
|
785
|
-
sys.stdout = log_file
|