geone 1.3.0__py313-none-manylinux_2_35_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
geone/srf.py ADDED
@@ -0,0 +1,3661 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ # -------------------------------------------------------------------------
5
+ # Python module: 'srf.py'
6
+ # author: Julien Straubhaar
7
+ # date: sep-2024
8
+ # -------------------------------------------------------------------------
9
+
10
+ """
11
+ Module for the generation of random fields based on substitution random function (SRF).
12
+ Random fields in 1D, 2D, 3D.
13
+
14
+ References
15
+ ----------
16
+ - J. Straubhaar, P. Renard (2024), \
17
+ Exploring substitution random functions composed of stationary multi-Gaussian processes. \
18
+ Stochastic Environmental Research and Risk Assessment, \
19
+ `doi:10.1007/s00477-024-02662-x <https://doi.org/10.1007/s00477-024-02662-x>`_
20
+ - C. Lantuéjoul (2002) Geostatistical Simulation, Models and Algorithms. \
21
+ Springer Verlag, Berlin, 256 p.
22
+ """
23
+
24
+ import numpy as np
25
+ import scipy.stats as stats
26
+ from scipy.interpolate import interp1d
27
+ from geone import covModel as gcm
28
+ from geone import markovChain as mc
29
+ from geone import multiGaussian
30
+
31
+ # ============================================================================
32
+ class SrfError(Exception):
33
+ """
34
+ Custom exception related to `srf` module.
35
+ """
36
+ pass
37
+ # ============================================================================
38
+
39
+ # ============================================================================
40
+ # Tools for simulating categorical SRF with
41
+ # - multi-Gaussian simulation as directing function (latent field)
42
+ # - Markov chain as coding process
43
+ # ============================================================================
44
+
45
+ # ----------------------------------------------------------------------------
46
+ def srf_mg_mc(
47
+ cov_model_T, kernel_Y,
48
+ dimension, spacing=None, origin=None,
49
+ spacing_Y=0.001,
50
+ categVal=None,
51
+ x=None, v=None,
52
+ t=None, yt=None,
53
+ algo_T='fft', params_T=None,
54
+ mh_iter=100, ntry_max=1,
55
+ nreal=1,
56
+ full_output=True,
57
+ verbose=1,
58
+ logger=None):
59
+ """
60
+ Substitution Random Function (SRF) - multi-Gaussian + Markov chain (on finite set).
61
+
62
+ This function allows to generate categorical random fields in 1D, 2D, 3D, based on
63
+ a SRF Z defined as
64
+
65
+ - Z(x) = Y(T(x))
66
+
67
+ where
68
+
69
+ - T is the directing function, a multi-Gaussian random field (latent field)
70
+ - Y is the coding process, a Markov chain on finite sets (of categories) (1D)
71
+
72
+ Z and T are fields in 1D, 2D or 3D.
73
+
74
+ Notes
75
+ -----
76
+ The module :mod:`multiGaussian` is used for the multi-Gaussian field T, and the
77
+ module :mod:`markovChain` is used for the markov chain Y.
78
+
79
+ Parameters
80
+ ----------
81
+ cov_model_T : :class:`geone.covModel.CovModel<d>D`
82
+ covariance model for T, in 1D or 2D or 3D
83
+
84
+ kernel_Y : 2d-array of shape (n, n)
85
+ transition kernel for Y of a Markov chain on a set of states
86
+ :math:`S=\\{0, \\ldots, n-1\\}`, where `n` is the number of categories
87
+ (states); the element at row `i` and column `j` is the probability to have
88
+ the state of index `j` at the next step given the state `i` at the current
89
+ step, i.e.
90
+
91
+ - :math:`kernel[i][j] = P(Y_{k+1}=j\\ \\vert\\ Y_{k}=i)`
92
+
93
+ where the sequence of random variables :math:`(Y_k)` is a Markov chain
94
+ on `S` defined by the kernel `kernel`.
95
+
96
+ In particular, every element of `kernel` is positive or zero, and its
97
+ rows sum to one.
98
+
99
+ dimension : [sequence of] int(s)
100
+ number of cells along each axis, for simulation in:
101
+
102
+ - 1D: `dimension=nx`
103
+ - 2D: `dimension=(nx, ny)`
104
+ - 3D: `dimension=(nx, ny, nz)`
105
+
106
+ spacing : [sequence of] float(s), optional
107
+ cell size along each axis, for simulation in:
108
+
109
+ - 1D: `spacing=sx`
110
+ - 2D: `spacing=(sx, sy)`
111
+ - 3D: `spacing=(sx, sy, sz)`
112
+
113
+ by default (`None`): 1.0 along each axis
114
+
115
+ origin : [sequence of] float(s), optional
116
+ origin of the grid ("corner of the first cell"), for simulation in:
117
+
118
+ - 1D: `origin=ox`
119
+ - 2D: `origin=(ox, oy)`
120
+ - 3D: `origin=(ox, oy, oz)`
121
+
122
+ by default (`None`): 0.0 along each axis
123
+
124
+ spacing_Y : float, default: 0.001
125
+ positive value, resolution of the Y process, spacing along abscissa
126
+ between two steps in the Markov chain Y (btw. two adjacent cell in
127
+ 1D-grid for Y)
128
+
129
+ categVal : 1d-array of shape (n,), optional
130
+ values of categories (one value for each state `0, ..., n-1`);
131
+ by default (`None`) : `categVal` is set to `[0, ..., n-1]`
132
+
133
+ x : array-like of floats, optional
134
+ data points locations (float coordinates), for simulation in:
135
+
136
+ - 1D: 1D array-like of floats
137
+ - 2D: 2D array-like of floats of shape (m, 2)
138
+ - 3D: 2D array-like of floats of shape (m, 3)
139
+
140
+ note: if one point (m=1), a float in 1D, a 1D array of shape (2,) in 2D,
141
+ a 1D array of shape (3,) in 3D, is accepted
142
+
143
+ v : 1d-array-like of floats, optional
144
+ data values at `x` (`v[i]` is the data value at `x[i]`)
145
+
146
+ t : 1d-array-like of floats, or float, optional
147
+ values of T considered as conditioning point for Y(T) (additional constraint)
148
+
149
+ yt : 1d-array-like of floats, or float, optional
150
+ value of Y at the conditioning point `t` (same length as `t`)
151
+
152
+ algo_T : str
153
+ defines the algorithm used for generating multi-Gaussian field T:
154
+
155
+ - 'fft' or 'FFT' (default): based on circulant embedding and FFT, \
156
+ function called for <d>D (d = 1, 2, or 3): `geone.grf.grf<d>D`
157
+ - 'classic' or 'CLASSIC': classic algorithm, based on the resolution \
158
+ of kriging system considered points in a search ellipsoid, function \
159
+ called for <d>D (d = 1, 2, or 3): `geone.geoscalassicinterface.simulate<d>D`
160
+
161
+ params_T : dict, optional
162
+ keyword arguments (additional parameters) to be passed to the function
163
+ corresponding to what is specified by the argument `algo_T` (see the
164
+ corresponding function for its keyword arguments), in particular the key
165
+ 'mean' can be specified (set to value 0 if not specified)
166
+
167
+ mh_iter : int, default: 100
168
+ number of iteration for Metropolis-Hasting algorithm, for conditional
169
+ simulation only; note: used only if `x` or `t` is not `None`
170
+
171
+ ntry_max : int, default: 1
172
+ number of tries per realization before giving up if something goes wrong
173
+
174
+ nreal : int, default: 1
175
+ number of realization(s)
176
+
177
+ full_output : bool, default: True
178
+ - if `True`: simulation(s) of Z, T, and Y are retrieved in output
179
+ - if `False`: simulation(s) of Z only is retrieved in output
180
+
181
+ verbose : int, default: 1
182
+ verbose mode, integer >=0, higher implies more display
183
+
184
+ logger : :class:`logging.Logger`, optional
185
+ logger (see package `logging`)
186
+ if specified, messages are written via `logger` (no print)
187
+
188
+ Returns
189
+ -------
190
+ Z : nd-array
191
+ all realizations, `Z[k]` is the `k`-th realization:
192
+
193
+ - for 1D: `Z` of shape (nreal, nx), where nx = dimension
194
+ - for 2D: `Z` of shape (nreal, ny, nx), where nx, ny = dimension
195
+ - for 3D: `Z` of shape (nreal, nz, ny, nx), where nx, ny, nz = dimension
196
+
197
+ T : nd-array
198
+ latent fields of all realizations, `T[k]` for the `k`-th realization:
199
+
200
+ - for 1D: `T` of shape (nreal, nx), where nx = dimension
201
+ - for 2D: `T` of shape (nreal, ny, nx), where nx, ny = dimension
202
+ - for 3D: `T` of shape (nreal, nz, ny, nx), where nx, ny, nz = dimension
203
+
204
+ returned if `full_output=True`
205
+
206
+ Y : list of length nreal
207
+ markov chains of all realizations, `Y[k]` is a list of length 4 for
208
+ the `k`-th realization:
209
+
210
+ - Y[k][0]: int, Y_nt (number of cell along t-axis)
211
+ - Y[k][1]: float, Y_st (cell size along t-axis)
212
+ - Y[k][2]: float, Y_ot (origin)
213
+ - Y[k][3]: 1d-array of shape (Y_nt,), values of Y[k]
214
+
215
+ returned if `full_output=True`
216
+ """
217
+ fname = 'srf_mg_mc'
218
+
219
+ if algo_T not in ('fft', 'FFT', 'classic', 'CLASSIC'):
220
+ err_msg = f"{fname}: `algo_T` invalid, should be 'fft' (default) or 'classic'"
221
+ if logger: logger.error(err_msg)
222
+ raise SrfError(err_msg)
223
+
224
+ # Set space dimension (of grid) according to covariance model for T
225
+ if isinstance(cov_model_T, gcm.CovModel1D):
226
+ d = 1
227
+ elif isinstance(cov_model_T, gcm.CovModel2D):
228
+ d = 2
229
+ elif isinstance(cov_model_T, gcm.CovModel3D):
230
+ d = 3
231
+ else:
232
+ err_msg = f'{fname}: `cov_model_T` invalid, should be a class `geone.covModel.CovModel1D`, `geone.covModel.CovModel2D` or `geone.covModel.CovModel3D`'
233
+ if logger: logger.error(err_msg)
234
+ raise SrfError(err_msg)
235
+
236
+ # Check argument 'dimension'
237
+ if hasattr(dimension, '__len__') and len(dimension) != d:
238
+ err_msg = f'{fname}: `dimension` of incompatible length'
239
+ if logger: logger.error(err_msg)
240
+ raise SrfError(err_msg)
241
+
242
+ if d == 1:
243
+ grid_size = dimension
244
+ else:
245
+ grid_size = np.prod(dimension)
246
+
247
+ # Check (or set) argument 'spacing'
248
+ if spacing is None:
249
+ if d == 1:
250
+ spacing = 1.0
251
+ else:
252
+ spacing = tuple(np.ones(d))
253
+ else:
254
+ if hasattr(spacing, '__len__') and len(spacing) != d:
255
+ err_msg = f'{fname}: `spacing` of incompatible length'
256
+ if logger: logger.error(err_msg)
257
+ raise SrfError(err_msg)
258
+
259
+ # Check (or set) argument 'origin'
260
+ if origin is None:
261
+ if d == 1:
262
+ origin = 0.0
263
+ else:
264
+ origin = tuple(np.zeros(d))
265
+ else:
266
+ if hasattr(origin, '__len__') and len(origin) != d:
267
+ err_msg = f'{fname}: `origin` of incompatible length'
268
+ if logger: logger.error(err_msg)
269
+ raise SrfError(err_msg)
270
+
271
+ # if not cov_model_T.is_stationary(): # prevent calculation if covariance model is not stationary
272
+ # if verbose > 0:
273
+ # print(f'ERROR ({fname}): `cov_model_T` is not stationary')
274
+
275
+ # Check kernel for Y
276
+ if not isinstance(kernel_Y, np.ndarray) or kernel_Y.ndim != 2 or kernel_Y.shape[0] != kernel_Y.shape[1]:
277
+ err_msg = f'{fname}: `kernel_Y` is not a square matrix (2d array)'
278
+ if logger: logger.error(err_msg)
279
+ raise SrfError(err_msg)
280
+
281
+ if np.any(kernel_Y < 0) or not np.all(np.isclose(kernel_Y.sum(axis=1), 1.0)):
282
+ err_msg = f'{fname}: `kernel_Y` is not a transition probability matrix'
283
+ if logger: logger.error(err_msg)
284
+ raise SrfError(err_msg)
285
+
286
+ # Number of categories (order of the kernel)
287
+ n = kernel_Y.shape[0]
288
+
289
+ # Check category values
290
+ if categVal is None:
291
+ categVal = np.arange(n)
292
+ else:
293
+ categVal = np.asarray(categVal)
294
+ if categVal.ndim != 1 or categVal.shape[0] != n:
295
+ err_msg = f'{fname}: `categVal` invalid'
296
+ if logger: logger.error(err_msg)
297
+ raise SrfError(err_msg)
298
+
299
+ if len(np.unique(categVal)) != len(categVal):
300
+ err_msg = f'{fname}: `categVal` contains duplicated values'
301
+ if logger: logger.error(err_msg)
302
+ raise SrfError(err_msg)
303
+
304
+ # Check additional constraint t (conditioning point for T), yt (corresponding value for Y)
305
+ if t is None:
306
+ if yt is not None:
307
+ err_msg = f'{fname}: `t` is not given (`None`) but `yt` is given (not `None`)'
308
+ if logger: logger.error(err_msg)
309
+ raise SrfError(err_msg)
310
+
311
+ else:
312
+ if yt is None:
313
+ err_msg = f'{fname}: `t` is given (not `None`) but `yt` is not given (`None`)'
314
+ if logger: logger.error(err_msg)
315
+ raise SrfError(err_msg)
316
+
317
+ t = np.asarray(t, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
318
+ yt = np.asarray(yt, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
319
+ if len(yt) != len(t):
320
+ err_msg = f'{fname}: length of `yt` is not valid'
321
+ if logger: logger.error(err_msg)
322
+ raise SrfError(err_msg)
323
+
324
+ # Check values
325
+ if not np.all([yv in categVal for yv in yt]):
326
+ err_msg = f'{fname}: `yt` contains an invalid value'
327
+ if logger: logger.error(err_msg)
328
+ raise SrfError(err_msg)
329
+
330
+ # Initialize dictionary params_T
331
+ if params_T is None:
332
+ params_T = {}
333
+
334
+ # Compute meshgrid over simulation domain if needed (see below)
335
+ if ('mean' in params_T.keys() and callable(params_T['mean'])) or ('var' in params_T.keys() and callable(params_T['var'])):
336
+ if d == 1:
337
+ xi = origin + spacing*(0.5+np.arange(dimension)) # x-coordinate of cell center
338
+ elif d == 2:
339
+ xi = origin[0] + spacing[0]*(0.5+np.arange(dimension[0])) # x-coordinate of cell center
340
+ yi = origin[1] + spacing[1]*(0.5+np.arange(dimension[1])) # y-coordinate of cell center
341
+ yyi, xxi = np.meshgrid(yi, xi, indexing='ij')
342
+ elif d == 3:
343
+ xi = origin[0] + spacing[0]*(0.5+np.arange(dimension[0])) # x-coordinate of cell center
344
+ yi = origin[1] + spacing[1]*(0.5+np.arange(dimension[1])) # y-coordinate of cell center
345
+ zi = origin[2] + spacing[2]*(0.5+np.arange(dimension[2])) # z-coordinate of cell center
346
+ zzi, yyi, xxi = np.meshgrid(zi, yi, xi, indexing='ij')
347
+
348
+ # Set mean_T (as array) from params_T
349
+ if 'mean' not in params_T.keys():
350
+ mean_T = np.array([0.0])
351
+ else:
352
+ mean_T = params_T['mean']
353
+ if mean_T is None:
354
+ mean_T = np.array([0.0])
355
+ elif callable(mean_T):
356
+ if d == 1:
357
+ mean_T = mean_T(xi).reshape(-1) # replace function 'mean_T' by its evaluation on the grid
358
+ elif d == 2:
359
+ mean_T = mean_T(xxi, yyi).reshape(-1) # replace function 'mean_T' by its evaluation on the grid
360
+ elif d == 3:
361
+ mean_T = mean_T(xxi, yyi, zzi).reshape(-1) # replace function 'mean_T' by its evaluation on the grid
362
+ else:
363
+ mean_T = np.asarray(mean_T).reshape(-1)
364
+ if mean_T.size not in (1, grid_size):
365
+ err_msg = f"{fname}: 'mean' parameter for T (in `params_T`) has incompatible size"
366
+ if logger: logger.error(err_msg)
367
+ raise SrfError(err_msg)
368
+
369
+ # Set var_T (as array) from params_T, if given
370
+ var_T = None
371
+ if 'var' in params_T.keys():
372
+ var_T = params_T['var']
373
+ if var_T is not None:
374
+ if callable(var_T):
375
+ if d == 1:
376
+ var_T = var_T(xi).reshape(-1) # replace function 'var_T' by its evaluation on the grid
377
+ elif d == 2:
378
+ var_T = var_T(xxi, yyi).reshape(-1) # replace function 'var_T' by its evaluation on the grid
379
+ elif d == 3:
380
+ var_T = var_T(xxi, yyi, zzi).reshape(-1) # replace function 'var_T' by its evaluation on the grid
381
+ else:
382
+ var_T = np.asarray(var_T).reshape(-1)
383
+ if var_T.size not in (1, grid_size):
384
+ err_msg = f"{fname}: 'var' parameter for T (in `params_T`) has incompatible size"
385
+ if logger: logger.error(err_msg)
386
+ raise SrfError(err_msg)
387
+
388
+ # Number of realization(s)
389
+ nreal = int(nreal) # cast to int if needed
390
+
391
+ if nreal <= 0:
392
+ if full_output:
393
+ if verbose > 0:
394
+ if logger:
395
+ logger.warning(f'{fname}: `nreal` <= 0: `None`, `None`, `None` is returned')
396
+ else:
397
+ print(f'{fname}: WARNING: `nreal` <= 0: `None`, `None`, `None` is returned')
398
+ return None, None, None
399
+ else:
400
+ if verbose > 0:
401
+ if logger:
402
+ logger.warning(f'{fname}: `nreal` <= 0: `None` is returned')
403
+ else:
404
+ print(f'{fname}: WARNING: `nreal` <= 0: `None` is returned')
405
+ return None
406
+
407
+ # Note: format of data (x, v) not checked !
408
+
409
+ if x is None:
410
+ # Preparation for unconditional case
411
+ if v is not None:
412
+ err_msg = f'{fname}: `x` is not given (`None`) but `v` is given (not `None`)'
413
+ if logger: logger.error(err_msg)
414
+ raise SrfError(err_msg)
415
+
416
+ else:
417
+ # Preparation for conditional case
418
+ if v is None:
419
+ err_msg = f'{fname}: `x` is given (not `None`) but `v` is not given (`None`)'
420
+ if logger: logger.error(err_msg)
421
+ raise SrfError(err_msg)
422
+
423
+ x = np.asarray(x, dtype='float').reshape(-1, d) # cast in d-dimensional array if needed
424
+ v = np.asarray(v, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
425
+ if len(v) != x.shape[0]:
426
+ err_msg = f'{fname}: length of `v` is not valid'
427
+ if logger: logger.error(err_msg)
428
+ raise SrfError(err_msg)
429
+
430
+ # Check values
431
+ if not np.all([yv in categVal for yv in v]):
432
+ err_msg = f'{fname}: `v` contains an invalid value'
433
+ if logger: logger.error(err_msg)
434
+ raise SrfError(err_msg)
435
+
436
+ # Number of conditioning points
437
+ npt = x.shape[0]
438
+
439
+ # Get index in mean_T for each conditioning points
440
+ x_mean_T_grid_ind = None
441
+ if mean_T.size == 1:
442
+ x_mean_T_grid_ind = np.zeros(npt, dtype='int')
443
+ else:
444
+ indc_f = (x-origin)/spacing
445
+ indc = indc_f.astype(int)
446
+ indc = indc - 1 * np.all((indc == indc_f, indc > 0), axis=0)
447
+ if d == 1:
448
+ x_mean_T_grid_ind = 1 * indc[:, 0] # multiply by 1.0 makes a copy of the array !
449
+ elif d == 2:
450
+ x_mean_T_grid_ind = indc[:, 0] + dimension[0] * indc[:, 1]
451
+ elif d == 3:
452
+ x_mean_T_grid_ind = indc[:, 0] + dimension[0] * (indc[:, 1] + dimension[1] * indc[:, 2])
453
+ #
454
+ # Get index in var_T (if not None) for each conditioning points
455
+ if var_T is not None:
456
+ if var_T.size == 1:
457
+ x_var_T_grid_ind = np.zeros(npt, dtype='int')
458
+ else:
459
+ if x_mean_T_grid_ind is not None:
460
+ x_var_T_grid_ind = x_mean_T_grid_ind
461
+ else:
462
+ indc_f = (x-origin)/spacing
463
+ indc = indc_f.astype(int)
464
+ indc = indc - 1 * np.all((indc == indc_f, indc > 0), axis=0)
465
+ if d == 1:
466
+ x_var_T_grid_ind = 1 * indc[:, 0] # multiply by 1.0 makes a copy of the array !
467
+ elif d == 2:
468
+ x_var_T_grid_ind = indc[:, 0] + dimension[0] * indc[:, 1]
469
+ elif d == 3:
470
+ x_var_T_grid_ind = indc[:, 0] + dimension[0] * (indc[:, 1] + dimension[1] * indc[:, 2])
471
+
472
+ # Get covariance function for T
473
+ cov_func_T = cov_model_T.func() # covariance function
474
+
475
+ # Get evaluation of covariance function for T at 0
476
+ cov0_T = cov_func_T(np.zeros(d))
477
+
478
+ # Set kriging matrix for T (mat_T) of order npt, "over every conditioining point"
479
+ mat_T = np.ones((npt, npt))
480
+ for i in range(npt-1):
481
+ # lag between x[i] and x[j], j=i+1, ..., npt-1
482
+ h = x[(i+1):] - x[i]
483
+ cov_h_T = cov_func_T(h)
484
+ mat_T[i, (i+1):npt] = cov_h_T
485
+ mat_T[(i+1):npt, i] = cov_h_T
486
+ mat_T[i, i] = cov0_T
487
+
488
+ mat_T[-1,-1] = cov0_T
489
+
490
+ if var_T is not None:
491
+ varUpdate = np.sqrt(var_T[x_var_T_grid_ind]/cov0_T)
492
+ mat_T = varUpdate*(mat_T.T*varUpdate).T
493
+
494
+ # Initialize
495
+ # - npt_ext: number of total conditioning point for Y, "point T(x) + additional constraint t"
496
+ # - v_T: values of T(x) (that are defined later) followed by values yt at additional constraint t"
497
+ # - v_ext: values for Y at "point T(x) + additional constraint (t)"
498
+ if t is None:
499
+ npt_ext = npt
500
+ v_T = np.zeros(npt)
501
+ v_ext = v
502
+ else:
503
+ npt_ext = npt + len(t)
504
+ v_T = np.hstack((np.zeros(npt), t))
505
+ v_ext = np.hstack((v, yt))
506
+
507
+ # Set index in categVal of values v_ext
508
+ v_ext_cat = np.array([np.where(categVal==yv)[0][0] for yv in v_ext], dtype='int')
509
+
510
+ if npt_ext <= 1:
511
+ mh_iter = 0 # unnecessary to apply Metropolis update !
512
+
513
+ # Preparation of
514
+ # - pinv : invariant distribution
515
+ # - kernel_Y_rev (reverse transition kernel)
516
+ # - kernel_Y_pow (kernel raised to power 0, 1, 2, ...)
517
+ # - kernel_Y_rev_pow (reverse kernel raised to power 0, 1, 2, ...)
518
+ try:
519
+ pinv_Y = mc.compute_mc_pinv(kernel_Y, logger=logger)
520
+ except Exception as exc:
521
+ err_msg = f'{fname}: computing invariant distribution for Y failed'
522
+ if logger: logger.error(err_msg)
523
+ raise SrfError(err_msg) from exc
524
+
525
+ try:
526
+ kernel_Y_rev = mc.compute_mc_kernel_rev(kernel_Y, pinv=pinv_Y, logger=logger)
527
+ except Exception as exc:
528
+ err_msg = f'{fname}: kernel for Y not reversible'
529
+ if logger: logger.error(err_msg)
530
+ raise SrfError(err_msg) from exc
531
+
532
+ m_pow = 1
533
+ kernel_Y_pow = np.zeros((m_pow, n, n))
534
+ kernel_Y_pow[0] = np.eye(n)
535
+
536
+ m_rev_pow = 1
537
+ kernel_Y_rev_pow = np.zeros((m_rev_pow, n, n))
538
+ kernel_Y_rev_pow[0] = np.eye(n)
539
+
540
+ if t is not None and len(t) > 1:
541
+ # Check validity of additional constraint (t, yt):
542
+ # check the compatibility with kernel_Y, i.e. that the probabilities:
543
+ # Prob(Y[t[k1]]=yt[k1], Y[t[k2]]=yt[k2]) > 0, for all pairs t[k1] < t[k2]
544
+ #
545
+ # Compute
546
+ # yind: node index of conditioning node (nearest node),
547
+ # rounded to lower index if between two grid node and index is positive
548
+ yind_f = (t-t.min())/spacing_Y
549
+ yind = yind_f.astype(int)
550
+ yind = yind - 1 * np.all((yind == yind_f, yind > 0), axis=0)
551
+ #
552
+ # Set index in categVal of values yt
553
+ yval_cat = np.array([np.where(categVal==yv)[0][0] for yv in yt], dtype='int')
554
+ #
555
+ inds = np.argsort(yind)
556
+ i0 = max(np.diff([yind[j] for j in inds]))
557
+ if i0 >= m_pow:
558
+ kernel_Y_pow = np.concatenate((kernel_Y_pow, np.zeros((i0-m_pow+1, n, n))), axis=0)
559
+ for i in range(m_pow, i0+1):
560
+ kernel_Y_pow[i] = kernel_Y_pow[i-1].dot(kernel_Y)
561
+ m_pow = i0+1
562
+ # check if Prob(Y[t[inds[i+1]]]=yt[inds[i+1]], Y[t[inds[i]]]=yt[inds[i]]) = kernel^(inds[i+1]-inds[i])[yval_cat[inds[i]], yval_cat[inds[i+1]]] > 0, for all i
563
+ if np.any(np.isclose([kernel_Y_pow[yind[inds[i+1]]-yind[inds[i]], int(yval_cat[inds[i]]), int(yval_cat[inds[i+1]])] for i in range(len(t)-1)], 0)):
564
+ # if np.any([kernel_Y_pow[yind[inds[i+1]]-yind[inds[i]], int(yval_cat[inds[i]]), int(yval_cat[inds[i+1]])] < 1.e-20 for i in range(len(t)-1)]):
565
+ err_msg = f'{fname}: invalid additional constraint on Markov chain Y wrt. kernel'
566
+ if logger: logger.error(err_msg)
567
+ raise SrfError(err_msg)
568
+
569
+ # Set (again if given) default parameter 'mean' and 'var' for T
570
+ params_T['mean'] = mean_T
571
+ params_T['var'] = var_T
572
+
573
+ # Set default parameter 'verbose' for params_T
574
+ if 'verbose' not in params_T.keys():
575
+ params_T['verbose'] = 0
576
+ # params_T['verbose'] = verbose
577
+
578
+ # Initialization for output
579
+ Z = []
580
+ if full_output:
581
+ T = []
582
+ Y = []
583
+
584
+ for ireal in range(nreal):
585
+ # Generate ireal-th realization
586
+ if verbose > 1:
587
+ if logger:
588
+ logger.info(f'{fname}: simulation {ireal+1} of {nreal}...')
589
+ else:
590
+ print(f'{fname}: simulation {ireal+1} of {nreal}...')
591
+ for ntry in range(ntry_max):
592
+ sim_ok = True
593
+ if verbose > 2 and ntry > 0:
594
+ if logger:
595
+ logger.info(f'{fname}: ... new trial ({ntry+1} of {ntry_max}) for simulation {ireal+1} of {nreal}...')
596
+ else:
597
+ print(f'{fname}: ... new trial ({ntry+1} of {ntry_max}) for simulation {ireal+1} of {nreal}...')
598
+ if x is None:
599
+ # Unconditional case
600
+ # ------------------
601
+ # Generate T (one real)
602
+ try:
603
+ sim_T = multiGaussian.multiGaussianRun(
604
+ cov_model_T, dimension, spacing, origin,
605
+ mode='simulation', algo=algo_T, output_mode='array',
606
+ **params_T, nreal=1, logger=logger)
607
+ except:
608
+ sim_ok = False
609
+ if verbose > 2:
610
+ if logger:
611
+ logger.info(f'{fname}: ... simulation of T failed')
612
+ else:
613
+ print(f'{fname}: ... simulation of T failed')
614
+ continue
615
+ # except Exception as exc:
616
+ # err_msg = f'{fname}: simulation of T failed'
617
+ # if logger: logger.error(err_msg)
618
+ # raise SrfError(err_msg) from exc
619
+
620
+ # -> nd-array of shape
621
+ # (1, dimension) (for T in 1D)
622
+ # (1, dimension[1], dimension[0]) (for T in 2D)
623
+ # (1, dimension[2], dimension[1], dimension[0]) (for T in 3D)
624
+
625
+ # Set origin and dimension for Y
626
+ min_T = np.min(sim_T)
627
+ max_T = np.max(sim_T)
628
+ if t is not None:
629
+ min_T = min(t.min(), min_T)
630
+ max_T = max(t.max(), max_T)
631
+ min_T = min_T - 0.5 * spacing_Y
632
+ max_T = max_T + 0.5 * spacing_Y
633
+ dimension_Y = int(np.ceil((max_T - min_T)/spacing_Y))
634
+ origin_Y = min_T - 0.5*(dimension_Y*spacing_Y - (max_T - min_T))
635
+
636
+ if t is not None:
637
+ # Compute
638
+ # yind: node index of conditioning node (nearest node),
639
+ # rounded to lower index if between two grid node and index is positive
640
+ yind_f = (t-origin_Y)/spacing_Y
641
+ yind = yind_f.astype(int)
642
+ yind = yind - 1 * np.all((yind == yind_f, yind > 0), axis=0)
643
+ #
644
+ yval = yt
645
+ else:
646
+ yind, yval = None, None
647
+
648
+ # Generate Y conditional to possible additional constraint (t, yt) (one real)
649
+ try:
650
+ mc_Y = mc.simulate_mc(
651
+ kernel_Y, dimension_Y,
652
+ categVal=categVal, data_ind=yind, data_val=yval,
653
+ pinv=pinv_Y, kernel_rev=kernel_Y_rev, kernel_pow=kernel_Y_pow,
654
+ nreal=1,
655
+ logger=logger)
656
+ except:
657
+ sim_ok = False
658
+ if verbose > 2:
659
+ if logger:
660
+ logger.info(f'{fname}: ... simulation of Markov chain Y failed')
661
+ else:
662
+ print(f'{fname}: ... simulation of Markov chain Y failed')
663
+ continue
664
+ # except Exception as exc:
665
+ # err_msg = f'{fname}: simulation of Markov chain Y failed'
666
+ # if logger: logger.error(err_msg)
667
+ # raise SrfError(err_msg) from exc
668
+
669
+ # -> 2d-array of shape (1, dimension_Y)
670
+
671
+ else:
672
+ # Conditional case
673
+ # ----------------
674
+ # Initialize: unconditional simulation of T at x (values in v_T)
675
+ ind = np.random.permutation(npt)
676
+ for j, k in enumerate(ind):
677
+ # Simulate value at x[k] (= x[ind[j]]), conditionally to the previous ones
678
+ # Solve the kriging system (for T)
679
+ try:
680
+ w = np.linalg.solve(
681
+ mat_T[ind[:j], :][:, ind[:j]], # kriging matrix
682
+ mat_T[ind[:j], ind[j]], # second member
683
+ )
684
+ except:
685
+ sim_ok = False
686
+ break
687
+
688
+ # Mean (kriged) value at x[k]
689
+ mu_T_k = mean_T[x_mean_T_grid_ind[k]] + (v_T[ind[:j]] - mean_T[x_mean_T_grid_ind[ind[:j]]]).dot(w)
690
+ # Standard deviation (of kriging) at x[k]
691
+ std_T_k = np.sqrt(np.maximum(0, cov0_T - np.dot(w, mat_T[ind[:j], ind[j]])))
692
+ # Draw value in N(mu_T_k, std_T_k^2)
693
+ v_T[k] = np.random.normal(loc=mu_T_k, scale=std_T_k)
694
+
695
+ if not sim_ok:
696
+ sim_ok = False
697
+ if verbose > 2:
698
+ if logger:
699
+ logger.info(f'{fname}: ... cannot solve kriging system (for T, initialization)')
700
+ else:
701
+ print(f'{fname}: ... cannot solve kriging system (for T, initialization)')
702
+ continue
703
+
704
+ # Update simulated values v_T at x using Metropolis-Hasting (MH) algorithm
705
+ for nit in range(mh_iter):
706
+ if verbose > 3:
707
+ if logger:
708
+ logger.info(f'{fname}: ... sim {ireal+1} of {nreal}: MH iter {nit+1} of {mh_iter}...')
709
+ else:
710
+ print(f'{fname}: ... sim {ireal+1} of {nreal}: MH iter {nit+1} of {mh_iter}...')
711
+ ind = np.random.permutation(npt)
712
+ for k in ind:
713
+ # Sequence of indexes without k
714
+ indmat = np.hstack((np.arange(k), np.arange(k+1, npt)))
715
+ # Simulate possible new value v_T_new at x[k], conditionally to all the ohter ones
716
+ #
717
+ # Solve the kriging system for T
718
+ try:
719
+ w = np.linalg.solve(
720
+ mat_T[indmat, :][:, indmat], # kriging matrix
721
+ mat_T[indmat, k], # second member
722
+ )
723
+ except:
724
+ sim_ok = False
725
+ if verbose > 2:
726
+ if logger:
727
+ logger.info(f'{fname}: ... cannot solve kriging system (for T)')
728
+ else:
729
+ print(f'{fname}: ... cannot solve kriging system (for T)')
730
+ break
731
+ #
732
+ # Mean (kriged) value at x[k]
733
+ mu_T_k = mean_T[x_mean_T_grid_ind[k]] + (v_T[indmat] - mean_T[x_mean_T_grid_ind[indmat]]).dot(w)
734
+ # Standard deviation (of kriging) at x[k]
735
+ std_T_k = np.sqrt(np.maximum(0, cov0_T - np.dot(w, mat_T[indmat, k])))
736
+ # Draw value in N(mu, std^2)
737
+ v_T_k_new = np.random.normal(loc=mu_T_k, scale=std_T_k)
738
+ #
739
+ # Compute MH quotient defined as
740
+ # p_new / p
741
+ # where:
742
+ # p_new = prob(Y[v_T_k_new] = v[k] | Y[indmat] = v[indmat], Y[t] = yt)
743
+ # p = prob(Y[v_T[k]] = v[k] | Y[indmat] = v[indmat], Y[t] = yt)
744
+ inds = np.argsort(v_T)
745
+ # --- Compute p ---
746
+ v_T_k_i = np.where(inds==k)[0][0]
747
+ # v_T[k] = v_T[inds[v_T_k_i]]
748
+ if v_T_k_i == 0:
749
+ # v_T[k] is the smallest value in v_T
750
+ # we have
751
+ # v_T[k] <= v_T[inds[v_T_k_i+1]]
752
+ # p = prob(Y[v_T[k]] = v_ext[k] | Y[v_T[inds[v_T_k_i+1]]] = v_ext[inds[v_T_k_i+1]])
753
+ i1 = int((v_T[inds[v_T_k_i+1]] - v_T[k]) / spacing_Y)
754
+ # p = kernel_Y_rev^i1[v_ext_cat[inds[v_T_k_i+1]], v_ext_cat[k]]
755
+ if i1 >= m_rev_pow:
756
+ kernel_Y_rev_pow = np.concatenate((kernel_Y_rev_pow, np.zeros((i1-m_rev_pow+1, n, n))), axis=0)
757
+ for i in range(m_rev_pow, i1+1):
758
+ kernel_Y_rev_pow[i] = kernel_Y_rev_pow[i-1].dot(kernel_Y_rev)
759
+ m_rev_pow = i1+1
760
+ p = kernel_Y_rev_pow[i1, v_ext_cat[inds[v_T_k_i+1]], v_ext_cat[k]]
761
+ elif v_T_k_i == npt_ext-1:
762
+ # v_T[k] is the largest value in v_T
763
+ # we have
764
+ # v_T[inds[v_T_k_i-1]] <= v_T[k]
765
+ # p = prob(Y[v_T[k]] = v_ext[k] | Y[v_T[inds[v_T_k_i-1]]] = v_ext[inds[v_T_k_i-1]])
766
+ i0 = int((v_T[k] - v_T[inds[v_T_k_i-1]]) / spacing_Y)
767
+ # p = kernel_Y^i0[v_ext_cat[inds[v_T_k_i-1]], v_ext_cat[k]]
768
+ if i0 >= m_pow:
769
+ kernel_Y_pow = np.concatenate((kernel_Y_pow, np.zeros((i0-m_pow+1, n, n))), axis=0)
770
+ for i in range(m_pow, i0+1):
771
+ kernel_Y_pow[i] = kernel_Y_pow[i-1].dot(kernel_Y)
772
+ m_pow = i0+1
773
+ p = kernel_Y_pow[i0, v_ext_cat[inds[v_T_k_i-1]], v_ext_cat[k]]
774
+ else:
775
+ # v_T[k] is neither the smallest nor the largest value in v_T
776
+ # we have
777
+ # v_T[inds[v_T_k_i-1]] <= v_T[k] <= v_T[inds[v_T_k_i+1]]
778
+ # p = prob(Y[v_T[k]] = v_ext[k] | Y[v_T[inds[v_T_k_i-1]]] = v_ext[inds[v_T_k_i-1]], Y[v_T[inds[v_T_k_i+1]]] = v_ext[inds[v_T_k_i+1]])
779
+ i0 = int((v_T[k] - v_T[inds[v_T_k_i-1]]) / spacing_Y)
780
+ i1 = int((v_T[inds[v_T_k_i+1]] - v_T[k]) / spacing_Y)
781
+ # p = kernel_Y^i0[v_ext_cat[inds[v_T_k_i-1]], v_ext_cat[k]] * kernel_Y^i1[v_ext_cat[k], v_ext_cat[inds[v_T_k_i+1]]] / kernel_Y^(i0+i1)[v_ext_cat[inds[v_T_k_i-1]], v_ext_cat[inds[v_T_k_i+1]]]
782
+ ii = i0+i1
783
+ if ii >= m_pow:
784
+ kernel_Y_pow = np.concatenate((kernel_Y_pow, np.zeros((ii-m_pow+1, n, n))), axis=0)
785
+ for i in range(m_pow, ii+1):
786
+ kernel_Y_pow[i] = kernel_Y_pow[i-1].dot(kernel_Y)
787
+ m_pow = ii+1
788
+ denom = kernel_Y_pow[ii, v_ext_cat[inds[v_T_k_i-1]], v_ext_cat[inds[v_T_k_i+1]]]
789
+ if np.isclose(denom, 0):
790
+ # p = 0.
791
+ # Accept new value v_T_new at x[k]
792
+ v_T[k] = v_T_k_new
793
+ continue
794
+ else:
795
+ p = kernel_Y_pow[i0, v_ext_cat[inds[v_T_k_i-1]], v_ext_cat[k]] * kernel_Y_pow[i1, v_ext_cat[k], v_ext_cat[inds[v_T_k_i+1]]] / denom
796
+ # --- Compute p_new ---
797
+ v_T_k_new_i = npt_ext
798
+ for i in range(npt_ext):
799
+ if v_T[inds[i]]>=v_T_k_new:
800
+ v_T_k_new_i = i
801
+ break
802
+ if v_T_k_new_i == 0:
803
+ # v_T_k_new <= v_T[i] for all i
804
+ # we have
805
+ # v_T_k_new <= v_T[inds[0]]
806
+ # p_new = prob(Y[v_T_k_new] = v_ext[k] | Y[v_T[inds[0]]] = v_ext[inds[0]])
807
+ i1 = int((v_T[inds[0]] - v_T_k_new) / spacing_Y)
808
+ # p_new = kernel_Y_rev^i1[v_ext_cat[inds[0]], v_ext_cat[k]]
809
+ if i1 >= m_rev_pow:
810
+ kernel_Y_rev_pow = np.concatenate((kernel_Y_rev_pow, np.zeros((i1-m_rev_pow+1, n, n))), axis=0)
811
+ for i in range(m_rev_pow, i1+1):
812
+ kernel_Y_rev_pow[i] = kernel_Y_rev_pow[i-1].dot(kernel_Y_rev)
813
+ m_rev_pow = i1+1
814
+ p_new = kernel_Y_rev_pow[i1, v_ext_cat[inds[0]], v_ext_cat[k]]
815
+ elif v_T_k_new_i == npt_ext:
816
+ # v_T_k_new > v_T[i] for all i
817
+ # we have
818
+ # v_T[inds[npt_ext-1] <= v_T_k_new
819
+ # p_new = prob(Y[v_T_k_new] = v_ext[k] | Y[v_T[inds[npt_ext-1]]] = v_ext[inds[npt_ext-1]])
820
+ i0 = int((v_T_k_new - v_T[inds[npt_ext-1]]) / spacing_Y)
821
+ # p_new = kernel_Y^i0[v_ext_cat[inds[npt_ext-1]], v_ext_cat[k]]
822
+ if i0 >= m_pow:
823
+ kernel_Y_pow = np.concatenate((kernel_Y_pow, np.zeros((i0-m_pow+1, n, n))), axis=0)
824
+ for i in range(m_pow, i0+1):
825
+ kernel_Y_pow[i] = kernel_Y_pow[i-1].dot(kernel_Y)
826
+ m_pow = i0+1
827
+ p_new = kernel_Y_pow[i0, v_ext_cat[npt_ext-1], v_ext_cat[k]]
828
+ else:
829
+ # we have
830
+ # v_T[inds[v_T_k_new_i-1]] < v_T_k_new <= v_T[inds[v_T_k_new_i]]
831
+ # p_new = prob(Y[v_T_k_new] = v_ext[k] | Y[v_T[inds[v_T_k_new_i-1]]] = v_ext[inds[v_T_k_new_i-1]], Y[v_T[inds[v_T_k_new_i]]] = v_ext[inds[v_T_k_new_i-1]])
832
+ i0 = int((v_T_k_new - v_T[inds[v_T_k_new_i-1]]) / spacing_Y)
833
+ i1 = int((v_T[inds[v_T_k_new_i]] - v_T_k_new) / spacing_Y)
834
+ # p = kernel_Y^i0[v_ext_cat[inds[v_T_k_new_i-1]], v_ext_cat[k]] * kernel_Y^i1[v_ext_cat[k], v_ext_cat[inds[v_T_k_new_i]]] / kernel_Y^(i0+i1)[v_ext_cat[inds[v_T_k_new_i-1]], v_ext_cat[inds[v_T_k_new_i]]]
835
+ ii = i0+i1
836
+ if ii >= m_pow:
837
+ kernel_Y_pow = np.concatenate((kernel_Y_pow, np.zeros((ii-m_pow+1, n, n))), axis=0)
838
+ for i in range(m_pow, ii+1):
839
+ kernel_Y_pow[i] = kernel_Y_pow[i-1].dot(kernel_Y)
840
+ m_pow = ii+1
841
+ denom = kernel_Y_pow[ii, v_ext_cat[inds[v_T_k_new_i-1]], v_ext_cat[inds[v_T_k_new_i]]]
842
+ if np.isclose(denom, 0):
843
+ p_new = 0.
844
+ else:
845
+ p_new = kernel_Y_pow[i0, v_ext_cat[inds[v_T_k_new_i-1]], v_ext_cat[k]] * kernel_Y_pow[i1, v_ext_cat[k], v_ext_cat[inds[v_T_k_new_i]]] / denom
846
+ #
847
+ mh_quotient = p_new/p
848
+ if mh_quotient >= 1.0 or np.random.random() < mh_quotient:
849
+ # Accept new value v_T_new at x[k]
850
+ v_T[k] = v_T_k_new
851
+ #
852
+ if not sim_ok:
853
+ break
854
+ #
855
+ if not sim_ok:
856
+ continue
857
+
858
+ # Generate T conditional to (x, v_T[0:npt]) (one real)
859
+ try:
860
+ sim_T = multiGaussian.multiGaussianRun(
861
+ cov_model_T, dimension, spacing, origin, x=x, v=v_T[:npt],
862
+ mode='simulation', algo=algo_T, output_mode='array',
863
+ **params_T, nreal=1, logger=logger)
864
+ except:
865
+ sim_ok = False
866
+ if verbose > 2:
867
+ if logger:
868
+ logger.info(f'{fname}: ... conditional simulation of T failed')
869
+ else:
870
+ print(f'{fname}: ... conditional simulation of T failed')
871
+ continue
872
+ # except Exception as exc:
873
+ # err_msg = f'{fname}: conditional simulation of T failed'
874
+ # if logger: logger.error(err_msg)
875
+ # raise SrfError(err_msg) from exc
876
+
877
+ # -> nd-array of shape
878
+ # (1, dimension) (for T in 1D)
879
+ # (1, dimension[1], dimension[0]) (for T in 2D)
880
+ # (1, dimension[2], dimension[1], dimension[0]) (for T in 3D)
881
+
882
+ # Set origin and dimension for Y
883
+ min_T = np.min(sim_T)
884
+ max_T = np.max(sim_T)
885
+ if t is not None:
886
+ min_T = min(t.min(), min_T)
887
+ max_T = max(t.max(), max_T)
888
+ min_T = min_T - 0.5 * spacing_Y
889
+ max_T = max_T + 0.5 * spacing_Y
890
+ dimension_Y = int(np.ceil((max_T - min_T)/spacing_Y))
891
+ origin_Y = min_T - 0.5*(dimension_Y*spacing_Y - (max_T - min_T))
892
+
893
+ # Compute
894
+ # yind: node index (nearest node),
895
+ # rounded to lower index if between two grid nodes and index is positive
896
+ yind_f = (v_T-origin_Y)/spacing_Y
897
+ yind = yind_f.astype(int)
898
+ yind = yind - 1 * np.all((yind == yind_f, yind > 0), axis=0)
899
+
900
+ # Generate Y conditional to (v_T, v_ext) (one real)
901
+ try:
902
+ mc_Y = mc.simulate_mc(
903
+ kernel_Y, dimension_Y,
904
+ categVal=categVal, data_ind=yind, data_val=v_ext,
905
+ pinv=pinv_Y, kernel_rev=kernel_Y_rev, kernel_pow=kernel_Y_pow,
906
+ nreal=1,
907
+ logger=logger)
908
+ except:
909
+ sim_ok = False
910
+ if verbose > 2:
911
+ if logger:
912
+ logger.info(f'{fname}: ... conditional simulation of Markov chain Y failed')
913
+ else:
914
+ print(f'{fname}: ... conditional simulation of Markov chain Y failed')
915
+ continue
916
+ # except Exception as exc:
917
+ # err_msg = f'{fname}: conditional simulation of Markov chain Y failed'
918
+ # if logger: logger.error(err_msg)
919
+ # raise SrfError(err_msg) from exc
920
+
921
+ # -> 2d-array of shape (1, dimension_Y)
922
+
923
+ # Generate Z (one real)
924
+ # Compute
925
+ # ind: node index (nearest node),
926
+ # rounded to lower index if between two grid nodes and index is positive
927
+ ind_f = (sim_T.reshape(-1) - origin_Y)/spacing_Y
928
+ ind = ind_f.astype(int)
929
+ ind = ind - 1 * np.all((ind == ind_f, ind > 0), axis=0)
930
+ Z_real = mc_Y[0][ind]
931
+ # Z_real = mc_Y[0][np.floor((sim_T.reshape(-1) - origin_Y)/spacing_Y).astype(int)]
932
+ if sim_ok:
933
+ Z.append(Z_real)
934
+ if full_output:
935
+ T.append(sim_T[0])
936
+ Y.append([dimension_Y, spacing_Y, origin_Y, mc_Y.reshape(dimension_Y)])
937
+ break
938
+
939
+ # Get Z
940
+ if verbose > 0 and len(Z) < nreal:
941
+ if logger:
942
+ logger.warning(f'{fname}: some realization failed (missing)')
943
+ else:
944
+ print(f'{fname}: WARNING: some realization failed (missing)')
945
+
946
+ Z = np.asarray(Z).reshape(len(Z), *np.atleast_1d(dimension)[::-1])
947
+
948
+ if full_output:
949
+ T = np.asarray(T).reshape(len(T), *np.atleast_1d(dimension)[::-1])
950
+ return Z, T, Y
951
+ else:
952
+ return Z
953
+ # ----------------------------------------------------------------------------
954
+
955
+ # ============================================================================
956
+ # Tools for simulating continuous SRF with
957
+ # - multi-Gaussian simulation as directing function (latent field)
958
+ # - multi-Gaussian simulation as coding process
959
+ # ============================================================================
960
+
961
+ # ----------------------------------------------------------------------------
962
+ def srf_mg_mg(
963
+ cov_model_T, cov_model_Y,
964
+ dimension, spacing=None, origin=None,
965
+ spacing_Y=0.001,
966
+ x=None, v=None,
967
+ t=None, yt=None,
968
+ vmin=None, vmax=None,
969
+ algo_T='fft', params_T=None,
970
+ algo_Y='fft', params_Y=None,
971
+ target_distrib=None,
972
+ initial_distrib=None,
973
+ mh_iter=100,
974
+ ntry_max=1,
975
+ nreal=1,
976
+ full_output=True,
977
+ verbose=1,
978
+ logger=None):
979
+ """
980
+ Substitution Random Function (SRF) - multi-Gaussian + multi-Gaussian.
981
+
982
+ This function allows to generate continuous random fields in 1D, 2D, 3D, based on
983
+ a SRF Z defined as
984
+
985
+ - Z(x) = Y(T(x))
986
+
987
+ where
988
+
989
+ - T is the directing function, a multi-Gaussian random field (latent field)
990
+ - Y is the coding process, a multi-Gaussian random process (1D)
991
+
992
+ Z and T are fields in 1D, 2D or 3D.
993
+
994
+ Notes
995
+ -----
996
+ The module :mod:`multiGaussian` is used for the multi-Gaussian fields T and Y.
997
+
998
+ Parameters
999
+ ----------
1000
+ cov_model_T : :class:`geone.covModel.CovModel<d>D`
1001
+ covariance model for T, in 1D or 2D or 3D
1002
+
1003
+ cov_model_Y : :class:`geone.covModel.CovModel1D`
1004
+ covariance model for Y, in 1D
1005
+
1006
+ dimension : [sequence of] int(s)
1007
+ number of cells along each axis, for simulation in:
1008
+
1009
+ - 1D: `dimension=nx`
1010
+ - 2D: `dimension=(nx, ny)`
1011
+ - 3D: `dimension=(nx, ny, nz)`
1012
+
1013
+ spacing : [sequence of] float(s), optional
1014
+ cell size along each axis, for simulation in:
1015
+
1016
+ - 1D: `spacing=sx`
1017
+ - 2D: `spacing=(sx, sy)`
1018
+ - 3D: `spacing=(sx, sy, sz)`
1019
+
1020
+ by default (`None`): 1.0 along each axis
1021
+
1022
+ origin : [sequence of] float(s), optional
1023
+ origin of the grid ("corner of the first cell"), for simulation in:
1024
+
1025
+ - 1D: `origin=ox`
1026
+ - 2D: `origin=(ox, oy)`
1027
+ - 3D: `origin=(ox, oy, oz)`
1028
+
1029
+ by default (`None`): 0.0 along each axis
1030
+
1031
+ spacing_Y : float, default: 0.001
1032
+ positive value, resolution of the Y process, spacing along abscissa
1033
+ between two cells in the field Y (btw. two adjacent cell in 1D-grid
1034
+ for Y)
1035
+
1036
+ x : array-like of floats, optional
1037
+ data points locations (float coordinates), for simulation in:
1038
+
1039
+ - 1D: 1D array-like of floats
1040
+ - 2D: 2D array-like of floats of shape (m, 2)
1041
+ - 3D: 2D array-like of floats of shape (m, 3)
1042
+
1043
+ note: if one point (m=1), a float in 1D, a 1D array of shape (2,) in 2D,
1044
+ a 1D array of shape (3,) in 3D, is accepted
1045
+
1046
+ v : 1d-array-like of floats, optional
1047
+ data values at `x` (`v[i]` is the data value at `x[i]`)
1048
+
1049
+ t : 1d-array-like of floats, or float, optional
1050
+ values of T considered as conditioning point for Y(T) (additional constraint)
1051
+
1052
+ yt : 1d-array-like of floats, or float, optional
1053
+ value of Y at the conditioning point `t` (same length as `t`)
1054
+
1055
+ vmin : float, optional
1056
+ minimal value for Z (or Y); simulation are rejected if not honoured
1057
+
1058
+ vmax : float, optional
1059
+ maximal value for Z (or Y); simulation are rejected if not honoured
1060
+
1061
+ algo_T : str
1062
+ defines the algorithm used for generating multi-Gaussian field T:
1063
+
1064
+ - 'fft' or 'FFT' (default): based on circulant embedding and FFT, \
1065
+ function called for <d>D (d = 1, 2, or 3): `geone.grf.grf<d>D`
1066
+ - 'classic' or 'CLASSIC': classic algorithm, based on the resolution \
1067
+ of kriging system considered points in a search ellipsoid, function \
1068
+ called for <d>D (d = 1, 2, or 3): `geone.geoscalassicinterface.simulate<d>D`
1069
+
1070
+ params_T : dict, optional
1071
+ keyword arguments (additional parameters) to be passed to the function
1072
+ corresponding to what is specified by the argument `algo_T` (see the
1073
+ corresponding function for its keyword arguments), in particular the key
1074
+ 'mean' can be specified (set to value 0 if not specified)
1075
+
1076
+ algo_Y : str
1077
+ defines the algorithm used for generating 1D multi-Gaussian field Y:
1078
+
1079
+ - 'fft' or 'FFT' (default): based on circulant embedding and FFT, \
1080
+ function called: :func:`geone.grf.grf1D`
1081
+ - 'classic' or 'CLASSIC': classic algorithm, based on the resolution \
1082
+ of kriging system considered points in a search ellipsoid, function \
1083
+ called: :func:`geone.geoscalassicinterface.simulate`
1084
+
1085
+ params_Y : dict, optional
1086
+ keyword arguments (additional parameters) to be passed to the function
1087
+ corresponding to what is specified by the argument `algo_Y` (see the
1088
+ corresponding function for its keyword arguments), in particular the key
1089
+ 'mean' can be specified (if not specified, set to the mean value of `v`
1090
+ if `v` is not `None`, set to 0 otherwise)
1091
+
1092
+ target_distrib : class
1093
+ target distribution for the value of a single realization of Z, with
1094
+ attributes:
1095
+
1096
+ - target_distrib.cdf : (`func`) cdf
1097
+ - target_distrib.ppf : (`func`) inverse cdf
1098
+
1099
+ See `initial_distrib` below.
1100
+
1101
+ initial_distrib : class
1102
+ initial distribution for the value of a single realization of Z, with
1103
+ attributes:
1104
+
1105
+ - initial_distrib.cdf : (`func`) cdf
1106
+ - initial_distrib.ppf : (`func`) inverse cdf
1107
+
1108
+ The procedure is the following:
1109
+
1110
+ 1. conditioning data value `v` (if present) are transormed:
1111
+ * `v_tilde = initial_distrib.ppf(target_distrib.cdf(v))`
1112
+ 2. SRF realization of `z_tilde` (conditionally to `v_tilde` if present) \
1113
+ is generated
1114
+ 3. back-transform is applied to obtain the final realization:
1115
+ * `z = target_distrib.ppf(initial_distrib.cdf(z_tilde))`
1116
+
1117
+ By default:
1118
+
1119
+ - `target_distrib = None`
1120
+ - `initial_distrib = None`
1121
+
1122
+ * For unconditional case:
1123
+ - if `target_distrib` is `None`: no transformation is applied
1124
+ - otherwise (not `None`): transformation is applied (step 3. above)
1125
+ * For conditional case:
1126
+ - if `target_distrib` is `None`: no transformation is applied
1127
+ - otherwise (not `None`): transformation is applied (steps 1 and 3. \
1128
+ above); this requires that `initial_distrib` is specified (not `None`), \
1129
+ or that `t` and `yt` are specified with the value "mean_T" given in `t`
1130
+
1131
+ The distribution `initial_distrib` is used when needed:
1132
+
1133
+ * as specified (if not `None`, be sure of what is given)
1134
+ * computed automatically otherwise (if `None`): \
1135
+ the distribution returned by the function \
1136
+ :func:`compute_distrib_Z_given_Y_of_mean_T`, with the keyword arguments \
1137
+ (only for unconditional case)
1138
+ - mean_Y : set to `mean_Y` (see above)
1139
+ - cov_T_0 : set to the covariance of T evaluated at 0 (`cov_model_T.func()(0)[0]`)
1140
+ - y_mean_T: set to `yt[i0]`, where `t[i0]=mean_T` (if exists, see `t`, `yt` above) \
1141
+ or set to Y(mean(T)) computed after step 2 above (otherwise)
1142
+
1143
+ mh_iter : int, default: 100
1144
+ number of iteration for Metropolis-Hasting algorithm, for conditional
1145
+ simulation only; note: used only if `x` or `t` is not `None`
1146
+
1147
+ ntry_max : int, default: 1
1148
+ number of tries per realization before giving up if something goes wrong
1149
+
1150
+ nreal : int, default: 1
1151
+ number of realization(s)
1152
+
1153
+ full_output : bool, default: True
1154
+ - if `True`: simulation(s) of Z, T, and Y are retrieved in output
1155
+ - if `False`: simulation(s) of Z only is retrieved in output
1156
+
1157
+ verbose : int, default: 1
1158
+ verbose mode, integer >=0, higher implies more display
1159
+
1160
+ logger : :class:`logging.Logger`, optional
1161
+ logger (see package `logging`)
1162
+ if specified, messages are written via `logger` (no print)
1163
+
1164
+ Returns
1165
+ -------
1166
+ Z : nd-array
1167
+ all realizations, `Z[k]` is the `k`-th realization:
1168
+
1169
+ - for 1D: `Z` of shape (nreal, nx), where nx = dimension
1170
+ - for 2D: `Z` of shape (nreal, ny, nx), where nx, ny = dimension
1171
+ - for 3D: `Z` of shape (nreal, nz, ny, nx), where nx, ny, nz = dimension
1172
+
1173
+ T : nd-array
1174
+ latent fields of all realizations, `T[k]` for the `k`-th realization:
1175
+
1176
+ - for 1D: `T` of shape (nreal, nx), where nx = dimension
1177
+ - for 2D: `T` of shape (nreal, ny, nx), where nx, ny = dimension
1178
+ - for 3D: `T` of shape (nreal, nz, ny, nx), where nx, ny, nz = dimension
1179
+
1180
+ returned if `full_output=True`
1181
+
1182
+ Y : list of length nreal
1183
+ 1D random fields of all realizations, `Y[k]` is a list of length 4 for
1184
+ the `k`-th realization:
1185
+
1186
+ - Y[k][0]: int, Y_nt (number of cell along t-axis)
1187
+ - Y[k][1]: float, Y_st (cell size along t-axis)
1188
+ - Y[k][2]: float, Y_ot (origin)
1189
+ - Y[k][3]: 1d-array of shape (Y_nt,), values of Y[k]
1190
+
1191
+ returned if `full_output=True`
1192
+ """
1193
+ fname = 'srf_mg_mg'
1194
+
1195
+ if algo_T not in ('fft', 'FFT', 'classic', 'CLASSIC'):
1196
+ err_msg = f"{fname}: `algo_T` invalid, should be 'fft' (default) or 'classic'"
1197
+ if logger: logger.error(err_msg)
1198
+ raise SrfError(err_msg)
1199
+
1200
+ if algo_Y not in ('fft', 'FFT', 'classic', 'CLASSIC'):
1201
+ err_msg = f"{fname}: `algo_Y` invalid, should be 'fft' (default) or 'classic'"
1202
+ if logger: logger.error(err_msg)
1203
+ raise SrfError(err_msg)
1204
+
1205
+ # Set space dimension (of grid) according to covariance model for T
1206
+ if isinstance(cov_model_T, gcm.CovModel1D):
1207
+ d = 1
1208
+ elif isinstance(cov_model_T, gcm.CovModel2D):
1209
+ d = 2
1210
+ elif isinstance(cov_model_T, gcm.CovModel3D):
1211
+ d = 3
1212
+ else:
1213
+ err_msg = f'{fname}: `cov_model_T` invalid, should be a class `geone.covModel.CovModel1D`, `geone.covModel.CovModel2D` or `geone.covModel.CovModel3D`'
1214
+ if logger: logger.error(err_msg)
1215
+ raise SrfError(err_msg)
1216
+
1217
+ # Check argument 'dimension'
1218
+ if hasattr(dimension, '__len__') and len(dimension) != d:
1219
+ err_msg = f'{fname}: `dimension` of incompatible length'
1220
+ if logger: logger.error(err_msg)
1221
+ raise SrfError(err_msg)
1222
+
1223
+ if d == 1:
1224
+ grid_size = dimension
1225
+ else:
1226
+ grid_size = np.prod(dimension)
1227
+
1228
+ # Check (or set) argument 'spacing'
1229
+ if spacing is None:
1230
+ if d == 1:
1231
+ spacing = 1.0
1232
+ else:
1233
+ spacing = tuple(np.ones(d))
1234
+ else:
1235
+ if hasattr(spacing, '__len__') and len(spacing) != d:
1236
+ err_msg = f'{fname}: `spacing` of incompatible length'
1237
+ if logger: logger.error(err_msg)
1238
+ raise SrfError(err_msg)
1239
+
1240
+ # Check (or set) argument 'origin'
1241
+ if origin is None:
1242
+ if d == 1:
1243
+ origin = 0.0
1244
+ else:
1245
+ origin = tuple(np.zeros(d))
1246
+ else:
1247
+ if hasattr(origin, '__len__') and len(origin) != d:
1248
+ err_msg = f'{fname}: `origin` of incompatible length'
1249
+ if logger: logger.error(err_msg)
1250
+ raise SrfError(err_msg)
1251
+
1252
+ # if not cov_model_T.is_stationary(): # prevent calculation if covariance model is not stationary
1253
+ # if verbose > 0:
1254
+ # print(f'ERROR ({fname}): `cov_model_T` is not stationary')
1255
+
1256
+ # Check covariance model for Y
1257
+ if not isinstance(cov_model_Y, gcm.CovModel1D):
1258
+ err_msg = f'{fname}: `cov_model_Y` invalid'
1259
+ if logger: logger.error(err_msg)
1260
+ raise SrfError(err_msg)
1261
+
1262
+ # elif not cov_model_Y.is_stationary(): # prevent calculation if covariance model is not stationary
1263
+ # err_msg = f'{fname}: `cov_model_Y` is not stationary'
1264
+ # if logger: logger.error(err_msg)
1265
+ # raise SrfError(err_msg)
1266
+
1267
+ # Check additional constraint t (conditioning point for T), yt (corresponding value for Y)
1268
+ if t is None:
1269
+ if yt is not None:
1270
+ err_msg = f'{fname}: `t` is not given (`None`) but `yt` is given (not `None`)'
1271
+ if logger: logger.error(err_msg)
1272
+ raise SrfError(err_msg)
1273
+
1274
+ else:
1275
+ if yt is None:
1276
+ err_msg = f'{fname}: `t` is given (not `None`) but `yt` is not given (`None`)'
1277
+ if logger: logger.error(err_msg)
1278
+ raise SrfError(err_msg)
1279
+
1280
+ t = np.asarray(t, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
1281
+ yt = np.asarray(yt, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
1282
+ if len(yt) != len(t):
1283
+ err_msg = f'{fname}: length of `yt` is not valid'
1284
+ if logger: logger.error(err_msg)
1285
+ raise SrfError(err_msg)
1286
+
1287
+ # Initialize dictionary params_T
1288
+ if params_T is None:
1289
+ params_T = {}
1290
+
1291
+ # Compute meshgrid over simulation domain if needed (see below)
1292
+ if ('mean' in params_T.keys() and callable(params_T['mean'])) or ('var' in params_T.keys() and callable(params_T['var'])):
1293
+ if d == 1:
1294
+ xi = origin + spacing*(0.5+np.arange(dimension)) # x-coordinate of cell center
1295
+ elif d == 2:
1296
+ xi = origin[0] + spacing[0]*(0.5+np.arange(dimension[0])) # x-coordinate of cell center
1297
+ yi = origin[1] + spacing[1]*(0.5+np.arange(dimension[1])) # y-coordinate of cell center
1298
+ yyi, xxi = np.meshgrid(yi, xi, indexing='ij')
1299
+ elif d == 3:
1300
+ xi = origin[0] + spacing[0]*(0.5+np.arange(dimension[0])) # x-coordinate of cell center
1301
+ yi = origin[1] + spacing[1]*(0.5+np.arange(dimension[1])) # y-coordinate of cell center
1302
+ zi = origin[2] + spacing[2]*(0.5+np.arange(dimension[2])) # z-coordinate of cell center
1303
+ zzi, yyi, xxi = np.meshgrid(zi, yi, xi, indexing='ij')
1304
+
1305
+ # Set mean_T (as array) from params_T
1306
+ if 'mean' not in params_T.keys():
1307
+ mean_T = np.array([0.0])
1308
+ else:
1309
+ mean_T = params_T['mean']
1310
+ if mean_T is None:
1311
+ mean_T = np.array([0.0])
1312
+ elif callable(mean_T):
1313
+ if d == 1:
1314
+ mean_T = mean_T(xi).reshape(-1) # replace function 'mean_T' by its evaluation on the grid
1315
+ elif d == 2:
1316
+ mean_T = mean_T(xxi, yyi).reshape(-1) # replace function 'mean_T' by its evaluation on the grid
1317
+ elif d == 3:
1318
+ mean_T = mean_T(xxi, yyi, zzi).reshape(-1) # replace function 'mean_T' by its evaluation on the grid
1319
+ else:
1320
+ mean_T = np.asarray(mean_T).reshape(-1)
1321
+ if mean_T.size not in (1, grid_size):
1322
+ err_msg = f"{fname}: 'mean' parameter for T (in `params_T`) has incompatible size"
1323
+ if logger: logger.error(err_msg)
1324
+ raise SrfError(err_msg)
1325
+
1326
+ # Set var_T (as array) from params_T, if given
1327
+ var_T = None
1328
+ if 'var' in params_T.keys():
1329
+ var_T = params_T['var']
1330
+ if var_T is not None:
1331
+ if callable(var_T):
1332
+ if d == 1:
1333
+ var_T = var_T(xi).reshape(-1) # replace function 'var_T' by its evaluation on the grid
1334
+ elif d == 2:
1335
+ var_T = var_T(xxi, yyi).reshape(-1) # replace function 'var_T' by its evaluation on the grid
1336
+ elif d == 3:
1337
+ var_T = var_T(xxi, yyi, zzi).reshape(-1) # replace function 'var_T' by its evaluation on the grid
1338
+ else:
1339
+ var_T = np.asarray(var_T).reshape(-1)
1340
+ if var_T.size not in (1, grid_size):
1341
+ err_msg = f"{fname}: 'var' parameter for T (in `params_T`) has incompatible size"
1342
+ if logger: logger.error(err_msg)
1343
+ raise SrfError(err_msg)
1344
+
1345
+ # Initialize dictionary params_Y
1346
+ if params_Y is None:
1347
+ params_Y = {}
1348
+
1349
+ # Set mean_Y from params_Y (if given, and check if it is a unique value)
1350
+ mean_Y = None
1351
+ if 'mean' in params_Y.keys():
1352
+ mean_Y = params_Y['mean']
1353
+ if callable(mean_Y):
1354
+ err_msg = f"{fname}: 'mean' parameter for Y (in `params_Y`) must be a unique value (float) if given"
1355
+ if logger: logger.error(err_msg)
1356
+ raise SrfError(err_msg)
1357
+
1358
+ else:
1359
+ mean_Y = np.asarray(mean_Y, dtype='float').reshape(-1)
1360
+ if mean_Y.size != 1:
1361
+ err_msg = f"{fname}: 'mean' parameter for Y (in `params_Y`) must be a unique value (float) if given"
1362
+ if logger: logger.error(err_msg)
1363
+ raise SrfError(err_msg)
1364
+
1365
+ mean_Y = mean_Y[0]
1366
+
1367
+ # Check var_Y from params_Y
1368
+ if 'var' in params_Y.keys() and params_Y['var'] is not None:
1369
+ err_msg = f"{fname}: 'var' parameter for Y (in `params_Y`) must be `None`"
1370
+ if logger: logger.error(err_msg)
1371
+ raise SrfError(err_msg)
1372
+
1373
+ # Check input for distribution transform
1374
+ if target_distrib is None:
1375
+ if initial_distrib is not None and verbose > 0:
1376
+ if logger:
1377
+ logger.warning(f'{fname}: target distribution not handled (`initial_distrib` ignored) because `target_distrib` is not given (`None`)')
1378
+ else:
1379
+ print(f'{fname}: WARNING: target distribution not handled (`initial_distrib` ignored) because `target_distrib` is not given (`None`)')
1380
+ else:
1381
+ if mean_T.size != 1:
1382
+ err_msg = f'{fname}: target distribution cannot be handled with non-stationary mean for T (in `params_T`)'
1383
+ if logger: logger.error(err_msg)
1384
+ raise SrfError(err_msg)
1385
+
1386
+ if x is not None:
1387
+ if initial_distrib is None:
1388
+ if 'mean' not in params_Y.keys():
1389
+ err_msg = f"{fname}: target distribution cannot be handled (cannot set `initial_distrib`: 'mean' for Y must be specified (in `params_Y`)"
1390
+ if logger: logger.error(err_msg)
1391
+ raise SrfError(err_msg)
1392
+
1393
+ else:
1394
+ if t is not None:
1395
+ ind = np.where(t==mean_T[0])[0]
1396
+ else:
1397
+ ind = []
1398
+ if len(ind) == 0:
1399
+ err_msg = f'{fname}: target distribution cannot be handled (cannot set `initial_distrib`: value of mean(T) should be specified in `t`)'
1400
+ if logger: logger.error(err_msg)
1401
+ raise SrfError(err_msg)
1402
+
1403
+ # Number of realization(s)
1404
+ nreal = int(nreal) # cast to int if needed
1405
+
1406
+ if nreal <= 0:
1407
+ if full_output:
1408
+ if verbose > 0:
1409
+ if logger:
1410
+ logger.warning(f'{fname}: `nreal` <= 0: `None`, `None`, `None` is returned')
1411
+ else:
1412
+ print(f'{fname}: WARNING: `nreal` <= 0: `None`, `None`, `None` is returned')
1413
+ return None, None, None
1414
+ else:
1415
+ if verbose > 0:
1416
+ if logger:
1417
+ logger.warning(f'{fname}: `nreal` <= 0: `None` is returned')
1418
+ else:
1419
+ print(f'{fname}: WARNING: `nreal` <= 0: `None` is returned')
1420
+ return None
1421
+
1422
+ # Note: format of data (x, v) not checked !
1423
+
1424
+ if x is None:
1425
+ if v is not None:
1426
+ err_msg = f'{fname}: `x` is not given (`None`) but `v` is given (not `None`)'
1427
+ if logger: logger.error(err_msg)
1428
+ raise SrfError(err_msg)
1429
+
1430
+ # Preparation for unconditional case
1431
+ # Set mean_Y
1432
+ if mean_Y is None:
1433
+ mean_Y = 0.0
1434
+ #
1435
+ # Preparation for distribution transform
1436
+ if target_distrib is None:
1437
+ # no distribution transform
1438
+ distrib_transf = 0
1439
+ else:
1440
+ distrib_transf = 1
1441
+ if initial_distrib is None:
1442
+ if t is not None:
1443
+ ind = np.where(t==mean_T[0])[0]
1444
+ else:
1445
+ ind = []
1446
+ if len(ind):
1447
+ y_mean_T = yt[ind[0]]
1448
+ cov_T_0 = cov_model_T.func()(np.zeros(d))[0]
1449
+ cov_Y_0 = cov_model_Y.func()(0.)[0]
1450
+ std_Y_0 = np.sqrt(cov_Y_0)
1451
+ initial_distrib = compute_distrib_Z_given_Y_of_mean_T(
1452
+ np.linspace(min(y_mean_T, mean_Y)-5.*std_Y_0, max(y_mean_T, mean_Y)+5.*std_Y_0, 501),
1453
+ cov_model_Y, mean_Y=mean_Y, y_mean_T=y_mean_T, cov_T_0=cov_T_0,
1454
+ fstd=4.5, nint=2001, assume_sorted=True
1455
+ )
1456
+ compute_initial_distrib = False
1457
+ else:
1458
+ # initial_distrib will be computed for each realization
1459
+ cov_T_0 = cov_model_T.func()(np.zeros(d))[0]
1460
+ cov_Y_0 = cov_model_Y.func()(0.)[0]
1461
+ std_Y_0 = np.sqrt(cov_Y_0)
1462
+ compute_initial_distrib = True
1463
+ else:
1464
+ compute_initial_distrib = False
1465
+ #
1466
+ else:
1467
+ # Preparation for conditional case
1468
+ if v is None:
1469
+ err_msg = f'{fname}: `x` is given (not `None`) but `v` is not given (`None`)'
1470
+ if logger: logger.error(err_msg)
1471
+ raise SrfError(err_msg)
1472
+
1473
+ x = np.asarray(x, dtype='float').reshape(-1, d) # cast in d-dimensional array if needed
1474
+ v = np.asarray(v, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
1475
+ if len(v) != x.shape[0]:
1476
+ err_msg = f'{fname}: length of `v` is not valid'
1477
+ if logger: logger.error(err_msg)
1478
+ raise SrfError(err_msg)
1479
+
1480
+ # Number of conditioning points
1481
+ npt = x.shape[0]
1482
+
1483
+ # Get index in mean_T for each conditioning points
1484
+ x_mean_T_grid_ind = None
1485
+ if mean_T.size == 1:
1486
+ x_mean_T_grid_ind = np.zeros(npt, dtype='int')
1487
+ else:
1488
+ indc_f = (x-origin)/spacing
1489
+ indc = indc_f.astype(int)
1490
+ indc = indc - 1 * np.all((indc == indc_f, indc > 0), axis=0)
1491
+ if d == 1:
1492
+ x_mean_T_grid_ind = 1 * indc[:, 0] # multiply by 1.0 makes a copy of the array !
1493
+ elif d == 2:
1494
+ x_mean_T_grid_ind = indc[:, 0] + dimension[0] * indc[:, 1]
1495
+ elif d == 3:
1496
+ x_mean_T_grid_ind = indc[:, 0] + dimension[0] * (indc[:, 1] + dimension[1] * indc[:, 2])
1497
+
1498
+ # Get index in var_T (if not None) for each conditioning points
1499
+ if var_T is not None:
1500
+ if var_T.size == 1:
1501
+ x_var_T_grid_ind = np.zeros(npt, dtype='int')
1502
+ else:
1503
+ if x_mean_T_grid_ind is not None:
1504
+ x_var_T_grid_ind = x_mean_T_grid_ind
1505
+ else:
1506
+ indc_f = (x-origin)/spacing
1507
+ indc = indc_f.astype(int)
1508
+ indc = indc - 1 * np.all((indc == indc_f, indc > 0), axis=0)
1509
+ if d == 1:
1510
+ x_var_T_grid_ind = 1 * indc[:, 0] # multiply by 1.0 makes a copy of the array !
1511
+ elif d == 2:
1512
+ x_var_T_grid_ind = indc[:, 0] + dimension[0] * indc[:, 1]
1513
+ elif d == 3:
1514
+ x_var_T_grid_ind = indc[:, 0] + dimension[0] * (indc[:, 1] + dimension[1] * indc[:, 2])
1515
+
1516
+ # Get covariance function for T and Y
1517
+ cov_func_T = cov_model_T.func() # covariance function
1518
+ cov_func_Y = cov_model_Y.func() # covariance function
1519
+
1520
+ # Get evaluation of covariance function for T and Y at 0
1521
+ cov0_T = cov_func_T(np.zeros(d))
1522
+ cov0_Y = cov_func_Y(np.zeros(1))
1523
+
1524
+ # Set mean_Y
1525
+ if mean_Y is None:
1526
+ mean_Y = np.mean(v)
1527
+
1528
+ # Preparation for distribution transform
1529
+ if target_distrib is None:
1530
+ # no distribution transform
1531
+ distrib_transf = 0
1532
+ else:
1533
+ distrib_transf = 1
1534
+ if initial_distrib is None:
1535
+ if t is not None:
1536
+ ind = np.where(t==mean_T[0])[0]
1537
+ else:
1538
+ ind = []
1539
+ if len(ind):
1540
+ y_mean_T = yt[ind[0]]
1541
+ cov_T_0 = cov0_T[0]
1542
+ cov_Y_0 = cov0_Y[0]
1543
+ std_Y_0 = np.sqrt(cov_Y_0)
1544
+ initial_distrib = compute_distrib_Z_given_Y_of_mean_T(
1545
+ np.linspace(min(y_mean_T, mean_Y)-5.*std_Y_0, max(y_mean_T, mean_Y)+5.*std_Y_0, 501),
1546
+ cov_model_Y, mean_Y=mean_Y, y_mean_T=y_mean_T, cov_T_0=cov_T_0,
1547
+ fstd=4.5, nint=2001, assume_sorted=True
1548
+ )
1549
+ else:
1550
+ distrib_transf = 0
1551
+
1552
+ if distrib_transf:
1553
+ # Transform the conditioning data value
1554
+ v = initial_distrib.ppf(target_distrib.cdf(v))
1555
+
1556
+ # Set kriging matrix for T (mat_T) of order npt, "over every conditioining point"
1557
+ mat_T = np.ones((npt, npt))
1558
+ for i in range(npt-1):
1559
+ # lag between x[i] and x[j], j=i+1, ..., npt-1
1560
+ h = x[(i+1):] - x[i]
1561
+ cov_h_T = cov_func_T(h)
1562
+ mat_T[i, (i+1):npt] = cov_h_T
1563
+ mat_T[(i+1):npt, i] = cov_h_T
1564
+ mat_T[i, i] = cov0_T
1565
+
1566
+ mat_T[-1,-1] = cov0_T
1567
+
1568
+ if var_T is not None:
1569
+ varUpdate = np.sqrt(var_T[x_var_T_grid_ind]/cov0_T)
1570
+ mat_T = varUpdate*(mat_T.T*varUpdate).T
1571
+
1572
+ # Initialize
1573
+ # - npt_ext: number of total conditioning point for Y, "point T(x) + additional constraint t"
1574
+ # - v_T: values of T(x) (that are defined later) followed by values yt at additional constraint t"
1575
+ # - v_ext: values for Y at "point T(x) + additional constraint (t)"
1576
+ # - mat_Y: kriging matrix for Y of order npt_ext, over "point T(x) + additional constraint t"
1577
+ if t is None:
1578
+ npt_ext = npt
1579
+ v_T = np.zeros(npt)
1580
+ v_ext = v
1581
+ mat_Y = np.ones((npt_ext, npt_ext))
1582
+ else:
1583
+ npt_ext = npt + len(t)
1584
+ v_T = np.hstack((np.zeros(npt), t))
1585
+ v_ext = np.hstack((v, yt))
1586
+ mat_Y = np.ones((npt_ext, npt_ext))
1587
+ for i in range(len(t)-1):
1588
+ # lag between t[i] and t[j], j=i+1, ..., len(t)-1
1589
+ h = t[(i+1):] - t[i]
1590
+ cov_h_Y = cov_func_Y(h)
1591
+ k = i + npt
1592
+ mat_Y[k, (k+1):] = cov_h_Y
1593
+ mat_Y[(k+1):, k] = cov_h_Y
1594
+ #mat_Y[k, k] = cov0_Y
1595
+
1596
+ #mat_Y[-1,-1] = cov0_Y
1597
+ for i in range(npt_ext):
1598
+ mat_Y[i, i] = cov0_Y
1599
+
1600
+ if npt_ext <= 1:
1601
+ mh_iter = 0 # unnecessary to apply Metropolis update !
1602
+
1603
+ # Set (again if given) default parameter 'mean' and 'var' for T, and 'mean' for Y
1604
+ params_T['mean'] = mean_T
1605
+ params_T['var'] = var_T
1606
+ params_Y['mean'] = mean_Y
1607
+
1608
+ # Set default parameter 'verbose' for params_T and params_Y
1609
+ if 'verbose' not in params_T.keys():
1610
+ params_T['verbose'] = 0
1611
+ # params_T['verbose'] = verbose
1612
+ if 'verbose' not in params_Y.keys():
1613
+ params_Y['verbose'] = 0
1614
+ # params_Y['verbose'] = verbose
1615
+
1616
+ # Initialization for output
1617
+ Z = []
1618
+ if full_output:
1619
+ T = []
1620
+ Y = []
1621
+
1622
+ for ireal in range(nreal):
1623
+ # Generate ireal-th realization
1624
+ if verbose > 1:
1625
+ if logger:
1626
+ logger.info(f'{fname}: simulation {ireal+1} of {nreal}...')
1627
+ else:
1628
+ print(f'{fname}: simulation {ireal+1} of {nreal}...')
1629
+ for ntry in range(ntry_max):
1630
+ sim_ok = True
1631
+ Y_cond_aggregation = False
1632
+ if verbose > 2 and ntry > 0:
1633
+ if logger:
1634
+ logger.info(f'{fname}: ... new trial ({ntry+1} of {ntry_max}) for simulation {ireal+1} of {nreal}...')
1635
+ else:
1636
+ print(f'{fname}: ... new trial ({ntry+1} of {ntry_max}) for simulation {ireal+1} of {nreal}...')
1637
+ if x is None:
1638
+ # Unconditional case
1639
+ # ------------------
1640
+ # Generate T (one real)
1641
+ try:
1642
+ sim_T = multiGaussian.multiGaussianRun(
1643
+ cov_model_T, dimension, spacing, origin,
1644
+ mode='simulation', algo=algo_T, output_mode='array',
1645
+ **params_T, nreal=1, logger=logger)
1646
+ except:
1647
+ sim_ok = False
1648
+ if verbose > 2:
1649
+ if logger:
1650
+ logger.info(f'{fname}: ... simulation of T failed')
1651
+ else:
1652
+ print(f'{fname}: ... simulation of T failed')
1653
+ continue
1654
+ # except Exception as exc:
1655
+ # err_msg = f'{fname}: simulation of T failed'
1656
+ # if logger: logger.error(err_msg)
1657
+ # raise SrfError(err_msg) from exc
1658
+
1659
+ # -> nd-array of shape
1660
+ # (1, dimension) (for T in 1D)
1661
+ # (1, dimension[1], dimension[0]) (for T in 2D)
1662
+ # (1, dimension[2], dimension[1], dimension[0]) (for T in 3D)
1663
+
1664
+ # Set origin and dimension for Y
1665
+ min_T = np.min(sim_T)
1666
+ max_T = np.max(sim_T)
1667
+ if t is not None:
1668
+ min_T = min(t.min(), min_T)
1669
+ max_T = max(t.max(), max_T)
1670
+ min_T = min_T - 0.5 * spacing_Y
1671
+ max_T = max_T + 0.5 * spacing_Y
1672
+ dimension_Y = int(np.ceil((max_T - min_T)/spacing_Y))
1673
+ origin_Y = min_T - 0.5*(dimension_Y*spacing_Y - (max_T - min_T))
1674
+
1675
+ # Generate Y conditional to possible additional constraint (t, yt) (one real)
1676
+ try:
1677
+ sim_Y = multiGaussian.multiGaussianRun(
1678
+ cov_model_Y, dimension_Y, spacing_Y, origin_Y, x=t, v=yt,
1679
+ mode='simulation', algo=algo_Y, output_mode='array',
1680
+ **params_Y, nreal=1, logger=logger)
1681
+ except:
1682
+ sim_ok = False
1683
+ if verbose > 2:
1684
+ if logger:
1685
+ logger.info(f'{fname}: ... simulation of Y failed')
1686
+ else:
1687
+ print(f'{fname}: ... simulation of Y failed')
1688
+ continue
1689
+ # except Exception as exc:
1690
+ # err_msg = f'{fname}: simulation of Y failed'
1691
+ # if logger: logger.error(err_msg)
1692
+ # raise SrfError(err_msg) from exc
1693
+
1694
+ # -> 2d-array of shape (1, dimension_Y)
1695
+
1696
+ if distrib_transf:
1697
+ if compute_initial_distrib:
1698
+ # Compute initial_distrib
1699
+ # (approximately based on mean(T) and y_mean_T)
1700
+ # print('... computing initial_distrib ...')
1701
+ sim_T_mean = sim_T.reshape(-1).mean()
1702
+ # Compute
1703
+ # ind: node index (nearest node),
1704
+ # rounded to lower index if between two grid nodes and index is positive
1705
+ ind_f = (sim_T_mean - origin_Y)/spacing_Y
1706
+ ind = ind_f.astype(int)
1707
+ ind = ind - 1 * np.all((ind == ind_f, ind > 0), axis=0)
1708
+ y_mean_T = sim_Y[0][ind]
1709
+ #y_mean_T = sim_Y[0][np.floor((sim_T_mean - origin_Y)/spacing_Y).astype(int)]
1710
+ initial_distrib = compute_distrib_Z_given_Y_of_mean_T(
1711
+ np.linspace(min(y_mean_T, mean_Y)-5.*std_Y_0, max(y_mean_T, mean_Y)+5.*std_Y_0, 501),
1712
+ cov_model_Y, mean_Y=mean_Y, y_mean_T=y_mean_T, cov_T_0=cov_T_0,
1713
+ fstd=4.5, nint=2001, assume_sorted=True
1714
+ )
1715
+ #
1716
+ # (Back-)transform sim_Y value
1717
+ sim_Y = target_distrib.ppf(initial_distrib.cdf(sim_Y))
1718
+ #
1719
+ else:
1720
+ # Conditional case
1721
+ # ----------------
1722
+ # Initialize: unconditional simulation of T at x (values in v_T)
1723
+ ind = np.random.permutation(npt)
1724
+ for j, k in enumerate(ind):
1725
+ # Simulate value at x[k] (= x[ind[j]]), conditionally to the previous ones
1726
+ # Solve the kriging system (for T)
1727
+ try:
1728
+ w = np.linalg.solve(
1729
+ mat_T[ind[:j], :][:, ind[:j]], # kriging matrix
1730
+ mat_T[ind[:j], ind[j]], # second member
1731
+ )
1732
+ except:
1733
+ sim_ok = False
1734
+ break
1735
+
1736
+ # Mean (kriged) value at x[k]
1737
+ mu_T_k = mean_T[x_mean_T_grid_ind[k]] + (v_T[ind[:j]] - mean_T[x_mean_T_grid_ind[ind[:j]]]).dot(w)
1738
+ # Standard deviation (of kriging) at x[k]
1739
+ std_T_k = np.sqrt(np.maximum(0, cov0_T - np.dot(w, mat_T[ind[:j], ind[j]])))
1740
+ # Draw value in N(mu_T_k, std_T_k^2)
1741
+ v_T[k] = np.random.normal(loc=mu_T_k, scale=std_T_k)
1742
+
1743
+ if not sim_ok:
1744
+ sim_ok = False
1745
+ if verbose > 2:
1746
+ if logger:
1747
+ logger.info(f'{fname}: ... cannot solve kriging system (for T, initialization)')
1748
+ else:
1749
+ print(f'{fname}: ... cannot solve kriging system (for T, initialization)')
1750
+ continue
1751
+
1752
+ # Updated kriging matrix for Y (mat_Y) according to value in v_T[0:npt]
1753
+ for i in range(npt-1):
1754
+ # lag between v_T[i] and v_T[j], j=i+1, ..., npt-1
1755
+ h = v_T[(i+1):npt] - v_T[i]
1756
+ cov_h_Y = cov_func_Y(h)
1757
+ mat_Y[i, (i+1):npt] = cov_h_Y
1758
+ mat_Y[(i+1):npt, i] = cov_h_Y
1759
+ # mat_Y[i, i] = cov0_Y
1760
+
1761
+ for i, k in enumerate(range(npt, npt_ext)):
1762
+ # lag between t[i] and v_T[j], j=0, ..., npt-1
1763
+ h = v_T[0:npt] - t[i]
1764
+ cov_h_Y = cov_func_Y(h)
1765
+ mat_Y[k, 0:npt] = cov_h_Y
1766
+ mat_Y[0:npt, k] = cov_h_Y
1767
+ # mat_Y[i, i] = cov0_Y
1768
+
1769
+ # mat_Y[-1,-1] = cov0_Y
1770
+
1771
+ # Update simulated values v_T at x using Metropolis-Hasting (MH) algorithm
1772
+ for nit in range(mh_iter):
1773
+ if verbose > 3:
1774
+ if logger:
1775
+ logger.info(f'{fname}: ... sim {ireal+1} of {nreal}: MH iter {nit+1} of {mh_iter}...')
1776
+ else:
1777
+ print(f'{fname}: ... sim {ireal+1} of {nreal}: MH iter {nit+1} of {mh_iter}...')
1778
+ ind = np.random.permutation(npt)
1779
+ for k in ind:
1780
+ # Sequence of indexes without k
1781
+ indmat = np.hstack((np.arange(k), np.arange(k+1, npt)))
1782
+ # Simulate possible new value v_T_new at x[k], conditionally to all the ohter ones
1783
+ #
1784
+ # Solve the kriging system for T
1785
+ try:
1786
+ w = np.linalg.solve(
1787
+ mat_T[indmat, :][:, indmat], # kriging matrix
1788
+ mat_T[indmat, k], # second member
1789
+ )
1790
+ except:
1791
+ sim_ok = False
1792
+ if verbose > 2:
1793
+ if logger:
1794
+ logger.info(f'{fname}: ... cannot solve kriging system (for T)')
1795
+ else:
1796
+ print(f'{fname}: ... cannot solve kriging system (for T)')
1797
+ break
1798
+ #
1799
+ # Mean (kriged) value at x[k]
1800
+ mu_T_k = mean_T[x_mean_T_grid_ind[k]] + (v_T[indmat] - mean_T[x_mean_T_grid_ind[indmat]]).dot(w)
1801
+ # Standard deviation (of kriging) at x[k]
1802
+ std_T_k = np.sqrt(np.maximum(0, cov0_T - np.dot(w, mat_T[indmat, k])))
1803
+ # Draw value in N(mu, std^2)
1804
+ v_T_k_new = np.random.normal(loc=mu_T_k, scale=std_T_k)
1805
+ #
1806
+ # Compute MH quotient defined as
1807
+ # prob(Y[v_T_k_new] = v[k] | Y[indmat] = v[indmat], Y[t] = yt) / prob(Y[v_T[k]] = v[k] | Y[indmat] = v[indmat], Y[t] = yt)
1808
+ # (where Y[t]=yt are the possible additional constraint)
1809
+ #
1810
+ # New lag from v_T_k_new and corresponding covariance for Y
1811
+ h_k_new = v_T_k_new - np.hstack((v_T[:k], v_T_k_new, v_T[k+1:]))
1812
+ cov_h_Y_k_new = cov_func_Y(h_k_new)
1813
+ # Solve the kriging system for Y for simulation at v_T[k] and at v_T_k_new
1814
+ indmat_ext = np.hstack((indmat, np.arange(npt, npt_ext)))
1815
+ try:
1816
+ w = np.linalg.solve(
1817
+ mat_Y[indmat_ext, :][:, indmat_ext], # kriging matrix
1818
+ np.vstack((mat_Y[indmat_ext, k], cov_h_Y_k_new[indmat_ext])).T # both second members
1819
+ )
1820
+ except:
1821
+ sim_ok = False
1822
+ if verbose > 2:
1823
+ if logger:
1824
+ logger.info(f'{fname}: ... cannot solve kriging system (for Y)')
1825
+ else:
1826
+ print(f'{fname}: ... cannot solve kriging system (for Y)')
1827
+ break
1828
+ # Mean (kriged) values at v_T[k] and v_T_k_new
1829
+ mu_Y_k = mean_Y + (v_ext[indmat_ext] - mean_Y).dot(w) # mu_k of shape(2, )
1830
+ # Variance (of kriging) at v_T[k] and v_T_k_new
1831
+ var_Y_k = np.maximum(1.e-20, cov0_Y - np.array([np.dot(w[:,0], mat_Y[indmat_ext, k]), np.dot(w[:,1], cov_h_Y_k_new[indmat_ext])]))
1832
+ # Set minimal variance to 1.e-20 to avoid division by zero
1833
+ #
1834
+ # MH quotient is
1835
+ # phi_{mean=mu_Y_k[1], var=var_Y_k[1]}(v[k]) / phi_{mean=mu_Y_k[0], var=var_Y_k[0]}(v[k])
1836
+ # where phi_{mean, var} is the pdf of the normal law of given mean and var
1837
+ # To avoid overflow in exp, compute log of mh quotient...
1838
+ log_mh_quotient = 0.5 * (np.log(var_Y_k[0]) + (v[k]-mu_Y_k[0])**2/var_Y_k[0] - np.log(var_Y_k[1]) - (v[k]-mu_Y_k[1])**2/var_Y_k[1])
1839
+ if log_mh_quotient >= 0.0 or np.random.random() < np.exp(log_mh_quotient):
1840
+ # Accept new value v_T_new at x[k]
1841
+ v_T[k] = v_T_k_new
1842
+ # Update kriging matrix for Y
1843
+ mat_Y[k,:] = cov_h_Y_k_new
1844
+ mat_Y[:,k] = cov_h_Y_k_new
1845
+ if not sim_ok:
1846
+ break
1847
+
1848
+ if not sim_ok:
1849
+ continue
1850
+
1851
+ # Generate T conditional to (x, v_T[0:npt]) (one real)
1852
+ try:
1853
+ sim_T = multiGaussian.multiGaussianRun(
1854
+ cov_model_T, dimension, spacing, origin, x=x, v=v_T[:npt],
1855
+ mode='simulation', algo=algo_T, output_mode='array',
1856
+ **params_T, nreal=1, logger=logger)
1857
+ except:
1858
+ sim_ok = False
1859
+ if verbose > 2:
1860
+ if logger:
1861
+ logger.info(f'{fname}: ... conditional simulation of T failed')
1862
+ else:
1863
+ print(f'{fname}: ... conditional simulation of T failed')
1864
+ continue
1865
+ # except Exception as exc:
1866
+ # err_msg = f'{fname}: conditional simulation of T failed'
1867
+ # if logger: logger.error(err_msg)
1868
+ # raise SrfError(err_msg) from exc
1869
+
1870
+ # -> nd-array of shape
1871
+ # (1, dimension) (for T in 1D)
1872
+ # (1, dimension[1], dimension[0]) (for T in 2D)
1873
+ # (1, dimension[2], dimension[1], dimension[0]) (for T in 3D)
1874
+
1875
+ # Set origin and dimension for Y
1876
+ min_T = np.min(sim_T)
1877
+ max_T = np.max(sim_T)
1878
+ if t is not None:
1879
+ min_T = min(t.min(), min_T)
1880
+ max_T = max(t.max(), max_T)
1881
+ min_T = min_T - 0.5 * spacing_Y
1882
+ max_T = max_T + 0.5 * spacing_Y
1883
+ dimension_Y = int(np.ceil((max_T - min_T)/spacing_Y))
1884
+ origin_Y = min_T - 0.5*(dimension_Y*spacing_Y - (max_T - min_T))
1885
+
1886
+ # Compute
1887
+ # indc: node index of conditioning node (nearest node),
1888
+ # rounded to lower index if between two grid node and index is positive
1889
+ indc_f = (v_T-origin_Y)/spacing_Y
1890
+ indc = indc_f.astype(int)
1891
+ indc = indc - 1 * np.all((indc == indc_f, indc > 0), axis=0)
1892
+ indc_unique, indc_inv = np.unique(indc, return_inverse=True)
1893
+ if len(indc_unique) == len(indc):
1894
+ v_T_unique = v_T
1895
+ v_ext_unique = v_ext
1896
+ else:
1897
+ Y_cond_aggregation = True
1898
+ v_T_unique = np.array([v_T[indc_inv==j].mean() for j in range(len(indc_unique))])
1899
+ v_ext_unique = np.array([v_ext[indc_inv==j].mean() for j in range(len(indc_unique))])
1900
+
1901
+ # Generate Y conditional to (v_T, v_ext) (one real)
1902
+ try:
1903
+ sim_Y = multiGaussian.multiGaussianRun(
1904
+ cov_model_Y, dimension_Y, spacing_Y, origin_Y, x=v_T_unique, v=v_ext_unique,
1905
+ mode='simulation', algo=algo_Y, output_mode='array',
1906
+ **params_Y, nreal=1, logger=logger)
1907
+ except:
1908
+ sim_ok = False
1909
+ if verbose > 2:
1910
+ if logger:
1911
+ logger.info(f'{fname}: ... conditional simulation of Y failed')
1912
+ else:
1913
+ print(f'{fname}: ... conditional simulation of Y failed')
1914
+ continue
1915
+ # except Exception as exc:
1916
+ # err_msg = f'{fname}: conditional simulation of Y failed'
1917
+ # if logger: logger.error(err_msg)
1918
+ # raise SrfError(err_msg) from exc
1919
+
1920
+ # -> 2d-array of shape (1, dimension_Y)
1921
+
1922
+ if distrib_transf:
1923
+ # Back-transform sim_Y value
1924
+ sim_Y = target_distrib.ppf(initial_distrib.cdf(sim_Y))
1925
+
1926
+ # Generate Z (one real)
1927
+ # Compute
1928
+ # ind: node index (nearest node),
1929
+ # rounded to lower index if between two grid nodes and index is positive
1930
+ ind_f = (sim_T.reshape(-1) - origin_Y)/spacing_Y
1931
+ ind = ind_f.astype(int)
1932
+ ind = ind - 1 * np.all((ind == ind_f, ind > 0), axis=0)
1933
+ Z_real = sim_Y[0][ind]
1934
+ #Z_real = sim_Y[0][np.floor((sim_T.reshape(-1) - origin_Y)/spacing_Y).astype(int)]
1935
+ if vmin is not None and Z_real.min() < vmin:
1936
+ sim_ok = False
1937
+ if verbose > 2:
1938
+ if logger:
1939
+ logger.info(f'{fname}: ... specified minimal value not honoured')
1940
+ else:
1941
+ print(f'{fname}: ... specified minimal value not honoured')
1942
+ continue
1943
+ if vmax is not None and Z_real.max() > vmax:
1944
+ sim_ok = False
1945
+ if verbose > 2:
1946
+ if logger:
1947
+ logger.info(f'{fname}: ... specified maximal value not honoured')
1948
+ else:
1949
+ print(f'{fname}: ... specified maximal value not honoured')
1950
+ continue
1951
+
1952
+ if sim_ok:
1953
+ if Y_cond_aggregation and verbose > 0:
1954
+ if logger:
1955
+ logger.warning(f'{fname}: conditioning points for Y falling in a same grid cell have been aggregated (mean) (real index {ireal})')
1956
+ else:
1957
+ print(f'{fname}: WARNING: conditioning points for Y falling in a same grid cell have been aggregated (mean) (real index {ireal})')
1958
+ Z.append(Z_real)
1959
+ if full_output:
1960
+ T.append(sim_T[0])
1961
+ Y.append([dimension_Y, spacing_Y, origin_Y, sim_Y.reshape(dimension_Y)])
1962
+ break
1963
+
1964
+ # Get Z
1965
+ if verbose > 0 and len(Z) < nreal:
1966
+ if logger:
1967
+ logger.warning(f'{fname}: some realization failed (missing)')
1968
+ else:
1969
+ print(f'{fname}: WARNING: some realization failed (missing)')
1970
+
1971
+ Z = np.asarray(Z).reshape(len(Z), *np.atleast_1d(dimension)[::-1])
1972
+
1973
+ if full_output:
1974
+ T = np.asarray(T).reshape(len(T), *np.atleast_1d(dimension)[::-1])
1975
+ return Z, T, Y
1976
+ else:
1977
+ return Z
1978
+ # ----------------------------------------------------------------------------
1979
+
1980
+ # ----------------------------------------------------------------------------
1981
+ class Distrib (object):
1982
+ """
1983
+ Class defining a distribution by a pdf, cdf, and ppf.
1984
+ """
1985
+ def __init__(self, pdf=None, cdf=None, ppf=None):
1986
+ self.pdf = pdf
1987
+ self.cdf = cdf
1988
+ self.ppf = ppf
1989
+ # ----------------------------------------------------------------------------
1990
+
1991
+ # ----------------------------------------------------------------------------
1992
+ def compute_distrib_Z_given_Y_of_mean_T(
1993
+ z, cov_model_Y,
1994
+ mean_Y=0.,
1995
+ y_mean_T=0.,
1996
+ cov_T_0=1.0,
1997
+ fstd=4.5,
1998
+ nint=2001,
1999
+ assume_sorted=False):
2000
+ """
2001
+ Computes the distribution of Z given Y(mean(T)), for a SRF Z = Y(T).
2002
+
2003
+ With a SRF Z = Y(T), compute the pdf, cdf and ppf (inverse cdf) of
2004
+ Z given Y(mean(T))=y_mean_T (applicable for a (large) ensemble of realizations).
2005
+
2006
+ The cdf is given by the equation (26) in the reference below. This equation
2007
+ requires expectations wrt. :math:`\\mathcal{N}(0, c\\_T\\_0)`, which are approximated
2008
+ using `nint` values in the interval :math:`\\pm fstd \\cdot \\sqrt{c\\_T\\_0}`.
2009
+
2010
+ Parameters
2011
+ ----------
2012
+ z : 1d-array of floats
2013
+ values at which the conditional cdf and pdf are evaluated before interpolation, e.g
2014
+ `numpy.linspace(z_min, z_max, n)` with given `z_min`, `z_max`, and `n`
2015
+
2016
+ cov_model_Y : :class:`geone.covModel.CovModel1D`
2017
+ covariance model for Y (coding process), in 1D
2018
+
2019
+ mean_Y : float, default: 0.0
2020
+ mean of Y
2021
+
2022
+ y_mean_T : float, default: 0.0
2023
+ imposed value for Y(mean(T))
2024
+
2025
+ cov_T_0 : float, default: 1.0
2026
+ covariance model of T (latent field, directing function) evaluated at 0
2027
+
2028
+ fstd : float, default: 4.5
2029
+ positive value used for computing approximation (see above)
2030
+
2031
+ nint : int, defualt: 2001
2032
+ positive integer used for computing approximation (see above)
2033
+
2034
+ assume_sorted : bool, default: False
2035
+ if `True`: `z` has to be an array of monotonically increasing values
2036
+
2037
+ Returns
2038
+ -------
2039
+ distrib : :class:`Distrib`
2040
+ distribution, where each attribute is a function (obtained by
2041
+ interpolation of its approximated evaluation at `z`):
2042
+
2043
+ - distrib.pdf: (func) pdf f_{Z|Y(mean(T))=y_mean_T}
2044
+ - distrib.cdf: (func) cdf F_{Z|Y(mean(T))=y_mean_T}
2045
+ - distrib.ppf: (func) inverse cdf
2046
+
2047
+ References
2048
+ ----------
2049
+ - J. Straubhaar, P. Renard (2024), \
2050
+ Exploring substitution random functions composed of stationary multi-Gaussian processes. \
2051
+ Stochastic Environmental Research and Risk Assessment, \
2052
+ `doi:10.1007/s00477-024-02662-x <https://doi.org/10.1007/s00477-024-02662-x>`_
2053
+ """
2054
+ # The cdf is given by (eq. 26 of the ref):
2055
+ # F_{Z|Y(mean(T))=y_mean_T}(z)
2056
+ # = P(Z < z | Y(mean(T)) = y_mean_T)
2057
+ # = E_{h~N(0, c_T(0))} [F(z)], F cdf of N(mean_Y + C_Y(h)/C_Y(0)*(y_mean_T-mean_Y), C_Y(0) - C_Y(h)**2/C_Y(0))
2058
+ # the pdf is given by:
2059
+ # f_{Z|Y(mean(T))=y_mean_T}(z)
2060
+ # = d/dz[P(Z < z | Y(mean(T)) = y_mean_T)]
2061
+ # = E_{h~N(0, c_T(0))} [f(z)], f cdf of N(mean_Y + C_Y(h)/C_Y(0)*(y_mean_T-mean_Y), C_Y(0) - C_Y(h)**2/C_Y(0))
2062
+ # where
2063
+ # C_Y: covariance function of the coding process Y
2064
+ # C_T: covariance function of the directing function T
2065
+ # The function F_{Z|Y(mean(T))=y_mean_T} and f_{Z|Y(mean(T))=y_mean_T} are set by interpolation of the evaluation at z
2066
+ # (can be np.linspace(z_min, z_max, n), i.e. at z_min + i * (z_max-z_min)/(n-1), i=0, ... n-1)
2067
+
2068
+ # Approximation is computed, using 'nint' values of h in the interval
2069
+ # +/-'fstd'*np.sqrt(2.0*c_T(0)) for the mean wrt. N(0, c_T(0))
2070
+
2071
+ # fname = 'compute_distrib_Z_given_Y_of_mean_T'
2072
+
2073
+ std_T_0 = np.sqrt(cov_T_0)
2074
+ a = fstd*std_T_0
2075
+ h = np.linspace(-a, a, nint)
2076
+ h_weight = np.exp(-0.5*h**2/cov_T_0)
2077
+ h_weight = h_weight / h_weight.sum()
2078
+
2079
+ z = np.asarray(z, dtype='float').reshape(-1)
2080
+
2081
+ cov_Y_h = cov_model_Y.func()(h)
2082
+ cov_Y_0 = cov_model_Y.func()(0.)
2083
+ pdf_value = np.array([np.sum(h_weight*stats.norm.pdf(zi, loc=mean_Y + cov_Y_h/cov_Y_0 * (y_mean_T - mean_Y), scale=np.maximum(np.sqrt(cov_Y_0-cov_Y_h**2/cov_Y_0), 1.e-20))) for zi in z])
2084
+ cdf_value = np.array([np.sum(h_weight*stats.norm.cdf(zi, loc=mean_Y + cov_Y_h/cov_Y_0 * (y_mean_T - mean_Y), scale=np.maximum(np.sqrt(cov_Y_0-cov_Y_h**2/cov_Y_0), 1.e-20))) for zi in z])
2085
+
2086
+ pdf = interp1d(z, pdf_value, assume_sorted=assume_sorted, bounds_error=False, fill_value=(0., 0.))
2087
+ cdf = interp1d(z, cdf_value, assume_sorted=assume_sorted, bounds_error=False, fill_value=(0., 1.))
2088
+ ppf = interp1d(cdf_value, z, assume_sorted=assume_sorted, bounds_error=False, fill_value=(z.min(), z.max()))
2089
+
2090
+ distrib = Distrib(pdf, cdf, ppf)
2091
+ return distrib
2092
+ # ----------------------------------------------------------------------------
2093
+
2094
+ # ============================================================================
2095
+ # Tools for simulating continuous SRF with
2096
+ # - two multi-Gaussian simulation as directing function
2097
+ # - 2D multi-Gaussian simulation as coding process
2098
+ # ============================================================================
2099
+
2100
+ # ----------------------------------------------------------------------------
2101
+ def srf_bimg_mg(
2102
+ cov_model_T1, cov_model_T2, cov_model_Y,
2103
+ dimension, spacing=None, origin=None,
2104
+ spacing_Y=(0.001, 0.001),
2105
+ x=None, v=None,
2106
+ t=None, yt=None,
2107
+ vmin=None, vmax=None,
2108
+ algo_T1='fft', params_T1=None,
2109
+ algo_T2='fft', params_T2=None,
2110
+ algo_Y='fft', params_Y=None,
2111
+ mh_iter=100,
2112
+ ntry_max=1,
2113
+ nreal=1,
2114
+ full_output=True,
2115
+ verbose=1,
2116
+ logger=None):
2117
+ """
2118
+ Substitution Random Function (SRF) - multi-Gaussian + multi-Gaussian.
2119
+
2120
+ This function allows to generate continuous random fields in 1D, 2D, 3D, based on
2121
+ a SRF Z defined as
2122
+
2123
+ - Z(x) = Y(T1(x), T2(x))
2124
+
2125
+ where
2126
+
2127
+ - T1, T1 are the directing functions (independent), two multi-Gaussian random fields \
2128
+ (latent fields)
2129
+ - Y is the coding process, a 2D multi-Gaussian random field
2130
+
2131
+ Z and T1, T2 are fields in 1D, 2D or 3D.
2132
+
2133
+ Notes
2134
+ -----
2135
+ The module :mod:`multiGaussian` is used for the multi-Gaussian fields T1, T2 and Y.
2136
+
2137
+ Parameters
2138
+ ----------
2139
+ cov_model_T1 : :class:`geone.covModel.CovModel<d>D`
2140
+ covariance model for T1, in 1D or 2D or 3D;
2141
+ note: can be set to `None`; in this case, `algo_T1='deterministic'`
2142
+ is requiered and `params_T1['mean']` defines the field T1
2143
+
2144
+ cov_model_T2 : :class:`geone.covModel.CovModel<d>D`
2145
+ covariance model for T2, in 1D or 2D or 3D
2146
+ note: can be set to `None`; in this case, `algo_T2='deterministic'`
2147
+ is requiered and `params_T2['mean']` defines the field T2
2148
+
2149
+ cov_model_Y : :class:`geone.covModel.CovModel2D`
2150
+ covariance model for Y, in 2D
2151
+
2152
+ dimension : [sequence of] int(s)
2153
+ number of cells along each axis, for simulation in:
2154
+
2155
+ - 1D: `dimension=nx`
2156
+ - 2D: `dimension=(nx, ny)`
2157
+ - 3D: `dimension=(nx, ny, nz)`
2158
+
2159
+ spacing : [sequence of] float(s), optional
2160
+ cell size along each axis, for simulation in:
2161
+
2162
+ - 1D: `spacing=sx`
2163
+ - 2D: `spacing=(sx, sy)`
2164
+ - 3D: `spacing=(sx, sy, sz)`
2165
+
2166
+ by default (`None`): 1.0 along each axis
2167
+
2168
+ origin : [sequence of] float(s), optional
2169
+ origin of the grid ("corner of the first cell"), for simulation in:
2170
+
2171
+ - 1D: `origin=ox`
2172
+ - 2D: `origin=(ox, oy)`
2173
+ - 3D: `origin=(ox, oy, oz)`
2174
+
2175
+ by default (`None`): 0.0 along each axis
2176
+
2177
+ spacing_Y : sequence of 2 floats, default: (0.001, 0.001)
2178
+ two positive values, resolution of the 2D Y field, along the two
2179
+ dimensions (corresponding to T1 and T2), spacing between two adjacent
2180
+ cells in the two directions
2181
+
2182
+ x : array-like of floats, optional
2183
+ data points locations (float coordinates), for simulation in:
2184
+
2185
+ - 1D: 1D array-like of floats
2186
+ - 2D: 2D array-like of floats of shape (m, 2)
2187
+ - 3D: 2D array-like of floats of shape (m, 3)
2188
+
2189
+ note: if one point (m=1), a float in 1D, a 1D array of shape (2,) in 2D,
2190
+ a 1D array of shape (3,) in 3D, is accepted
2191
+
2192
+ v : 1d-array-like of floats, optional
2193
+ data values at `x` (`v[i]` is the data value at `x[i]`)
2194
+
2195
+ t : 2d-array of floats or sequence of 2 floats, optional
2196
+ values of (T1, T2) considered as conditioning point for Y(T) (additional constraint)m
2197
+ each row corresponding to one point;
2198
+ note: if only one point, a sequence of 2 floats is accepted
2199
+
2200
+ yt : 1d-array-like of floats, or float, optional
2201
+ value of Y at the conditioning point `t`
2202
+
2203
+ vmin : float, optional
2204
+ minimal value for Z (or Y); simulation are rejected if not honoured
2205
+
2206
+ vmax : float, optional
2207
+ maximal value for Z (or Y); simulation are rejected if not honoured
2208
+
2209
+ algo_T1 : str
2210
+ defines the algorithm used for generating multi-Gaussian field T1:
2211
+
2212
+ - 'fft' or 'FFT' (default): based on circulant embedding and FFT, \
2213
+ function called for <d>D (d = 1, 2, or 3): `geone.grf.grf<d>D`
2214
+ - 'classic' or 'CLASSIC': classic algorithm, based on the resolution \
2215
+ of kriging system considered points in a search ellipsoid, function \
2216
+ called for <d>D (d = 1, 2, or 3): `geone.geoscalassicinterface.simulate<d>D`
2217
+ - 'deterministic' or 'DETERMINISTIC': use a deterministic field defined \
2218
+ by `params_T1['mean']`
2219
+
2220
+ params_T1 : dict, optional
2221
+ keyword arguments (additional parameters) to be passed to the function
2222
+ corresponding to what is specified by the argument `algo_T1` (see the
2223
+ corresponding function for its keyword arguments), in particular the key
2224
+ 'mean' can be specified (set to value 0 if not specified)
2225
+
2226
+ algo_T2 : str
2227
+ defines the algorithm used for generating multi-Gaussian field T2:
2228
+
2229
+ - 'fft' or 'FFT' (default): based on circulant embedding and FFT, \
2230
+ function called for <d>D (d = 1, 2, or 3): `geone.grf.grf<d>D`
2231
+ - 'classic' or 'CLASSIC': classic algorithm, based on the resolution \
2232
+ of kriging system considered points in a search ellipsoid, function \
2233
+ called for <d>D (d = 1, 2, or 3): `geone.geoscalassicinterface.simulate<d>D`
2234
+ - 'deterministic' or 'DETERMINISTIC': use a deterministic field defined \
2235
+ by `params_T2['mean']`
2236
+
2237
+ params_T2 : dict, optional
2238
+ keyword arguments (additional parameters) to be passed to the function
2239
+ corresponding to what is specified by the argument `algo_T2` (see the
2240
+ corresponding function for its keyword arguments), in particular the key
2241
+ 'mean' can be specified (set to value 0 if not specified)
2242
+
2243
+ algo_Y : str
2244
+ defines the algorithm used for generating 2D multi-Gaussian field Y:
2245
+
2246
+ - 'fft' or 'FFT' (default): based on circulant embedding and FFT, \
2247
+ function called: :func:`geone.grf.grf2D`
2248
+ - 'classic' or 'CLASSIC': classic algorithm, based on the resolution \
2249
+ of kriging system considered points in a search ellipsoid, function \
2250
+ called: :func:`geone.geoscalassicinterface.simulate`
2251
+
2252
+ params_Y : dict, optional
2253
+ keyword arguments (additional parameters) to be passed to the function
2254
+ corresponding to what is specified by the argument `algo_Y` (see the
2255
+ corresponding function for its keyword arguments), in particular the key
2256
+ 'mean' can be specified (if not specified, set to the mean value of `v`
2257
+ if `v` is not `None`, set to 0 otherwise)
2258
+
2259
+ mh_iter : int, default: 100
2260
+ number of iteration for Metropolis-Hasting algorithm, for conditional
2261
+ simulation only; note: used only if `x` or `t` is not `None`
2262
+
2263
+ ntry_max : int, default: 1
2264
+ number of tries per realization before giving up if something goes wrong
2265
+
2266
+ nreal : int, default: 1
2267
+ number of realization(s)
2268
+
2269
+ full_output : bool, default: True
2270
+ - if `True`: simulation(s) of Z, T1, T2, and Y are retrieved in output
2271
+ - if `False`: simulation(s) of Z only is retrieved in output
2272
+
2273
+ verbose : int, default: 1
2274
+ verbose mode, integer >=0, higher implies more display
2275
+
2276
+ logger : :class:`logging.Logger`, optional
2277
+ logger (see package `logging`)
2278
+ if specified, messages are written via `logger` (no print)
2279
+
2280
+ Returns
2281
+ -------
2282
+ Z : nd-array
2283
+ all realizations, `Z[k]` is the `k`-th realization:
2284
+
2285
+ - for 1D: `Z` of shape (nreal, nx), where nx = dimension
2286
+ - for 2D: `Z` of shape (nreal, ny, nx), where nx, ny = dimension
2287
+ - for 3D: `Z` of shape (nreal, nz, ny, nx), where nx, ny, nz = dimension
2288
+
2289
+ T1 : nd-array
2290
+ latent fields of all realizations, `T1[k]` for the `k`-th realization:
2291
+
2292
+ - for 1D: `T1` of shape (nreal, nx), where nx = dimension
2293
+ - for 2D: `T1` of shape (nreal, ny, nx), where nx, ny = dimension
2294
+ - for 3D: `T1` of shape (nreal, nz, ny, nx), where nx, ny, nz = dimension
2295
+
2296
+ returned if `full_output=True`
2297
+
2298
+ T2 : nd-array
2299
+ latent fields of all realizations, `T2[k]` for the `k`-th realization:
2300
+
2301
+ - for 1D: `T2` of shape (nreal, nx), where nx = dimension
2302
+ - for 2D: `T2` of shape (nreal, ny, nx), where nx, ny = dimension
2303
+ - for 3D: `T2` of shape (nreal, nz, ny, nx), where nx, ny, nz = dimension
2304
+
2305
+ returned if `full_output=True`
2306
+
2307
+ Y : list of length nreal
2308
+ 2D random fields of all realizations, `Y[k]` is a list of length 4 for
2309
+ the `k`-th realization:
2310
+
2311
+ - Y[k][0]: 1d-array of shape (2,): (Y_nx, Y_ny)
2312
+ - Y[k][1]: 1d-array of shape (2,): (Y_sx, Y_sy)
2313
+ - Y[k][2]: 1d-array of shape (2,): (Y_ox, Y_oy)
2314
+ - Y[k][3]: 2d-array of shape (Y_ny, Y_nx): values of Y[k]
2315
+
2316
+ returned if `full_output=True`
2317
+ """
2318
+ fname = 'srf_bimg_mg'
2319
+
2320
+ if algo_T1 not in ('fft', 'FFT', 'classic', 'CLASSIC', 'deterministic', 'DETERMINISTIC'):
2321
+ err_msg = f"{fname}: `algo_T1` invalid, should be 'fft' (default) or 'classic'"
2322
+ if logger: logger.error(err_msg)
2323
+ raise SrfError(err_msg)
2324
+
2325
+ if algo_T2 not in ('fft', 'FFT', 'classic', 'CLASSIC', 'deterministic', 'DETERMINISTIC'):
2326
+ err_msg = f"{fname}: `algo_T2` invalid, should be 'fft' (default) or 'classic'"
2327
+ if logger: logger.error(err_msg)
2328
+ raise SrfError(err_msg)
2329
+
2330
+ if algo_Y not in ('fft', 'FFT', 'classic', 'CLASSIC'):
2331
+ err_msg = f"{fname}: `algo_Y` invalid, should be 'fft' (default) or 'classic'"
2332
+ if logger: logger.error(err_msg)
2333
+ raise SrfError(err_msg)
2334
+
2335
+ # Ignore covariance model if 'algo' is deterministic for T1, T2
2336
+ if algo_T1 in ('deterministic', 'DETERMINISTIC'):
2337
+ cov_model_T1 = None
2338
+
2339
+ if algo_T2 in ('deterministic', 'DETERMINISTIC'):
2340
+ cov_model_T2 = None
2341
+
2342
+ # Set space dimension (of grid) according to covariance model for T1
2343
+ d = 0
2344
+ if cov_model_T1 is None:
2345
+ if algo_T1 not in ('deterministic', 'DETERMINISTIC'):
2346
+ err_msg = f"{fname}: `cov_model_T1` is `None`, then `algo_T1` must be 'deterministic'"
2347
+ if logger: logger.error(err_msg)
2348
+ raise SrfError(err_msg)
2349
+
2350
+ elif isinstance(cov_model_T1, gcm.CovModel1D):
2351
+ d = 1
2352
+ elif isinstance(cov_model_T1, gcm.CovModel2D):
2353
+ d = 2
2354
+ elif isinstance(cov_model_T1, gcm.CovModel3D):
2355
+ d = 3
2356
+ else:
2357
+ err_msg = f'{fname}: `cov_model_T1` invalid, should be a class `geone.covModel.CovModel1D`, `geone.covModel.CovModel2D` or `geone.covModel.CovModel3D`'
2358
+ if logger: logger.error(err_msg)
2359
+ raise SrfError(err_msg)
2360
+
2361
+ if cov_model_T2 is None:
2362
+ if algo_T2 not in ('deterministic', 'DETERMINISTIC'):
2363
+ err_msg = f"{fname}: `cov_model_T2` is `None`, then `algo_T2` must be 'deterministic'"
2364
+ if logger: logger.error(err_msg)
2365
+ raise SrfError(err_msg)
2366
+
2367
+ # if d == 0:
2368
+ # err_msg = f'{fname}: `cov_model_T1` and `cov_model_T2` are `None`, at least one covariance model is required'
2369
+ # if logger: logger.error(err_msg)
2370
+ # raise SrfError(err_msg)
2371
+
2372
+ elif (d == 1 and not isinstance(cov_model_T2, gcm.CovModel1D)) or (d == 2 and not isinstance(cov_model_T2, gcm.CovModel2D)) or (d == 3 and not isinstance(cov_model_T2, gcm.CovModel3D)):
2373
+ err_msg = f'{fname}: `cov_model_T1` and `cov_model_T2` not compatible (dimensions differ)'
2374
+ if logger: logger.error(err_msg)
2375
+ raise SrfError(err_msg)
2376
+
2377
+ if d == 0:
2378
+ # Set space dimension (of grid) according to 'dimension'
2379
+ if hasattr(dimension, '__len__'):
2380
+ d = len(dimension)
2381
+ else:
2382
+ d = 1
2383
+
2384
+ # Check argument 'dimension'
2385
+ if hasattr(dimension, '__len__') and len(dimension) != d:
2386
+ err_msg = f'{fname}: `dimension` of incompatible length'
2387
+ if logger: logger.error(err_msg)
2388
+ raise SrfError(err_msg)
2389
+
2390
+ if d == 1:
2391
+ grid_size = dimension
2392
+ else:
2393
+ grid_size = np.prod(dimension)
2394
+
2395
+ # Check (or set) argument 'spacing'
2396
+ if spacing is None:
2397
+ if d == 1:
2398
+ spacing = 1.0
2399
+ else:
2400
+ spacing = tuple(np.ones(d))
2401
+ else:
2402
+ if hasattr(spacing, '__len__') and len(spacing) != d:
2403
+ err_msg = f'{fname}: `spacing` of incompatible length'
2404
+ if logger: logger.error(err_msg)
2405
+ raise SrfError(err_msg)
2406
+
2407
+ # Check (or set) argument 'origin'
2408
+ if origin is None:
2409
+ if d == 1:
2410
+ origin = 0.0
2411
+ else:
2412
+ origin = tuple(np.zeros(d))
2413
+ else:
2414
+ if hasattr(origin, '__len__') and len(origin) != d:
2415
+ err_msg = f'{fname}: `origin` of incompatible length'
2416
+ if logger: logger.error(err_msg)
2417
+ raise SrfError(err_msg)
2418
+
2419
+ # if not cov_model_T1.is_stationary(): # prevent calculation if covariance model is not stationary
2420
+ # if verbose > 0:
2421
+ # print(f'ERROR ({fname}): `cov_model_T1` is not stationary')
2422
+
2423
+ # if not cov_model_T2.is_stationary(): # prevent calculation if covariance model is not stationary
2424
+ # if verbose > 0:
2425
+ # print(f'ERROR ({fname}): `cov_model_T2` is not stationary')
2426
+
2427
+ # Check covariance model for Y
2428
+ if not isinstance(cov_model_Y, gcm.CovModel2D):
2429
+ err_msg = f'{fname}: `cov_model_Y` invalid'
2430
+ if logger: logger.error(err_msg)
2431
+ raise SrfError(err_msg)
2432
+
2433
+ # elif not cov_model_Y.is_stationary(): # prevent calculation if covariance model is not stationary
2434
+ # err_msg = f'{fname}: `cov_model_Y` is not stationary'
2435
+ # if logger: logger.error(err_msg)
2436
+ # raise SrfError(err_msg)
2437
+
2438
+ # Check additional constraint t (conditioning point for (T1, T2)), yt (corresponding value for Y)
2439
+ if t is None:
2440
+ if yt is not None:
2441
+ err_msg = f'{fname}: `t` is not given (`None`) but `yt` is given (not `None`)'
2442
+ if logger: logger.error(err_msg)
2443
+ raise SrfError(err_msg)
2444
+
2445
+ else:
2446
+ if yt is None:
2447
+ err_msg = f'{fname}: `t` is given (not `None`) but `yt` is not given (`None`)'
2448
+ if logger: logger.error(err_msg)
2449
+ raise SrfError(err_msg)
2450
+
2451
+ t = np.asarray(t, dtype='float').reshape(-1, 2) # cast in 2-dimensional array if needed
2452
+ yt = np.asarray(yt, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
2453
+ if len(yt) != len(t):
2454
+ err_msg = f'{fname}: length of `yt` is not valid'
2455
+ if logger: logger.error(err_msg)
2456
+ raise SrfError(err_msg)
2457
+
2458
+ # Initialize dictionaries params_T1, params_T2
2459
+ if params_T1 is None:
2460
+ params_T1 = {}
2461
+ if params_T2 is None:
2462
+ params_T2 = {}
2463
+
2464
+ # Compute meshgrid over simulation domain if needed (see below)
2465
+ if ('mean' in params_T1.keys() and callable(params_T1['mean'])) or ('var' in params_T1.keys() and callable(params_T1['var'])) \
2466
+ or ('mean' in params_T2.keys() and callable(params_T2['mean'])) or ('var' in params_T2.keys() and callable(params_T2['var'])):
2467
+ if d == 1:
2468
+ xi = origin + spacing*(0.5+np.arange(dimension)) # x-coordinate of cell center
2469
+ elif d == 2:
2470
+ xi = origin[0] + spacing[0]*(0.5+np.arange(dimension[0])) # x-coordinate of cell center
2471
+ yi = origin[1] + spacing[1]*(0.5+np.arange(dimension[1])) # y-coordinate of cell center
2472
+ yyi, xxi = np.meshgrid(yi, xi, indexing='ij')
2473
+ elif d == 3:
2474
+ xi = origin[0] + spacing[0]*(0.5+np.arange(dimension[0])) # x-coordinate of cell center
2475
+ yi = origin[1] + spacing[1]*(0.5+np.arange(dimension[1])) # y-coordinate of cell center
2476
+ zi = origin[2] + spacing[2]*(0.5+np.arange(dimension[2])) # z-coordinate of cell center
2477
+ zzi, yyi, xxi = np.meshgrid(zi, yi, xi, indexing='ij')
2478
+
2479
+ # Set mean_T1 (as array) from params_T1
2480
+ if 'mean' not in params_T1.keys():
2481
+ mean_T1 = np.array([0.0])
2482
+ else:
2483
+ mean_T1 = params_T1['mean']
2484
+ if mean_T1 is None:
2485
+ mean_T1 = np.array([0.0])
2486
+ elif callable(mean_T1):
2487
+ if d == 1:
2488
+ mean_T1 = mean_T1(xi).reshape(-1) # replace function 'mean_T1' by its evaluation on the grid
2489
+ elif d == 2:
2490
+ mean_T1 = mean_T1(xxi, yyi).reshape(-1) # replace function 'mean_T1' by its evaluation on the grid
2491
+ elif d == 3:
2492
+ mean_T1 = mean_T1(xxi, yyi, zzi).reshape(-1) # replace function 'mean_T1' by its evaluation on the grid
2493
+ else:
2494
+ mean_T1 = np.asarray(mean_T1).reshape(-1)
2495
+ if mean_T1.size not in (1, grid_size):
2496
+ err_msg = f"{fname}: 'mean' parameter for T1 (in `params_T1`) has incompatible size"
2497
+ if logger: logger.error(err_msg)
2498
+ raise SrfError(err_msg)
2499
+
2500
+ # Set var_T1 (as array) from params_T1, if given
2501
+ var_T1 = None
2502
+ if 'var' in params_T1.keys():
2503
+ var_T1 = params_T1['var']
2504
+ if var_T1 is not None:
2505
+ if callable(var_T1):
2506
+ if d == 1:
2507
+ var_T1 = var_T1(xi).reshape(-1) # replace function 'var_T1' by its evaluation on the grid
2508
+ elif d == 2:
2509
+ var_T1 = var_T1(xxi, yyi).reshape(-1) # replace function 'var_T1' by its evaluation on the grid
2510
+ elif d == 3:
2511
+ var_T1 = var_T1(xxi, yyi, zzi).reshape(-1) # replace function 'var_T1' by its evaluation on the grid
2512
+ else:
2513
+ var_T1 = np.asarray(var_T1).reshape(-1)
2514
+ if var_T1.size not in (1, grid_size):
2515
+ err_msg = f"{fname}: 'var' parameter for T1 (in `params_T1`) has incompatible size"
2516
+ if logger: logger.error(err_msg)
2517
+ raise SrfError(err_msg)
2518
+
2519
+ # Set mean_T2 (as array) from params_T2
2520
+ if 'mean' not in params_T2.keys():
2521
+ mean_T2 = np.array([0.0])
2522
+ else:
2523
+ mean_T2 = params_T2['mean']
2524
+ if mean_T2 is None:
2525
+ mean_T2 = np.array([0.0])
2526
+ elif callable(mean_T2):
2527
+ if d == 1:
2528
+ mean_T2 = mean_T2(xi).reshape(-1) # replace function 'mean_T2' by its evaluation on the grid
2529
+ elif d == 2:
2530
+ mean_T2 = mean_T2(xxi, yyi).reshape(-1) # replace function 'mean_T2' by its evaluation on the grid
2531
+ elif d == 3:
2532
+ mean_T2 = mean_T2(xxi, yyi, zzi).reshape(-1) # replace function 'mean_T2' by its evaluation on the grid
2533
+ else:
2534
+ mean_T2 = np.asarray(mean_T2).reshape(-1)
2535
+ if mean_T2.size not in (1, grid_size):
2536
+ err_msg = f"{fname}: 'mean' parameter for T2 (in `params_T2`) has incompatible size"
2537
+ if logger: logger.error(err_msg)
2538
+ raise SrfError(err_msg)
2539
+
2540
+ # Set var_T2 (as array) from params_T2, if given
2541
+ var_T2 = None
2542
+ if 'var' in params_T2.keys():
2543
+ var_T2 = params_T2['var']
2544
+ if var_T2 is not None:
2545
+ if callable(var_T2):
2546
+ if d == 1:
2547
+ var_T2 = var_T2(xi).reshape(-1) # replace function 'var_T2' by its evaluation on the grid
2548
+ elif d == 2:
2549
+ var_T2 = var_T2(xxi, yyi).reshape(-1) # replace function 'var_T2' by its evaluation on the grid
2550
+ elif d == 3:
2551
+ var_T2 = var_T2(xxi, yyi, zzi).reshape(-1) # replace function 'var_T2' by its evaluation on the grid
2552
+ else:
2553
+ var_T2 = np.asarray(var_T2).reshape(-1)
2554
+ if var_T2.size not in (1, grid_size):
2555
+ err_msg = f"{fname}: 'var' parameter for T2 (in `params_T2`) has incompatible size"
2556
+ if logger: logger.error(err_msg)
2557
+ raise SrfError(err_msg)
2558
+
2559
+ # Initialize dictionary params_Y
2560
+ if params_Y is None:
2561
+ params_Y = {}
2562
+
2563
+ # Set mean_Y from params_Y (if given, and check if it is a unique value)
2564
+ mean_Y = None
2565
+ if 'mean' in params_Y.keys():
2566
+ mean_Y = params_Y['mean']
2567
+ if callable(mean_Y):
2568
+ err_msg = f"{fname}: 'mean' parameter for Y (in `params_Y`) must be a unique value (float) if given"
2569
+ if logger: logger.error(err_msg)
2570
+ raise SrfError(err_msg)
2571
+
2572
+ else:
2573
+ mean_Y = np.asarray(mean_Y, dtype='float').reshape(-1)
2574
+ if mean_Y.size != 1:
2575
+ err_msg = f"{fname}: 'mean' parameter for Y (in `params_Y`) must be a unique value (float) if given"
2576
+ if logger: logger.error(err_msg)
2577
+ raise SrfError(err_msg)
2578
+
2579
+ mean_Y = mean_Y[0]
2580
+
2581
+ # Check var_Y from params_Y
2582
+ if 'var' in params_Y.keys() and params_Y['var'] is not None:
2583
+ err_msg = f"{fname}: 'var' parameter for Y (in `params_Y`) must be `None`"
2584
+ if logger: logger.error(err_msg)
2585
+ raise SrfError(err_msg)
2586
+
2587
+ # Number of realization(s)
2588
+ nreal = int(nreal) # cast to int if needed
2589
+
2590
+ if nreal <= 0:
2591
+ if full_output:
2592
+ if verbose > 0:
2593
+ if logger:
2594
+ logger.warning(f'{fname}: `nreal` <= 0: `None`, `None`, `None` is returned')
2595
+ else:
2596
+ print(f'{fname}: WARNING: `nreal` <= 0: `None`, `None`, `None` is returned')
2597
+ return None, None, None
2598
+ else:
2599
+ if verbose > 0:
2600
+ if logger:
2601
+ logger.warning(f'{fname}: `nreal` <= 0: `None` is returned')
2602
+ else:
2603
+ print(f'{fname}: WARNING: `nreal` <= 0: `None` is returned')
2604
+ return None
2605
+
2606
+ # Note: format of data (x, v) not checked !
2607
+
2608
+ if x is None:
2609
+ if v is not None:
2610
+ err_msg = f'{fname}: `x` is not given (`None`) but `v` is given (not `None`)'
2611
+ if logger: logger.error(err_msg)
2612
+ raise SrfError(err_msg)
2613
+
2614
+ # Preparation for unconditional case
2615
+ # Set mean_Y
2616
+ if mean_Y is None:
2617
+ mean_Y = 0.0
2618
+ #
2619
+ else:
2620
+ # Preparation for conditional case
2621
+ if v is None:
2622
+ err_msg = f'{fname}: `x` is given (not `None`) but `v` is not given (`None`)'
2623
+ if logger: logger.error(err_msg)
2624
+ raise SrfError(err_msg)
2625
+
2626
+ x = np.asarray(x, dtype='float').reshape(-1, d) # cast in d-dimensional array if needed
2627
+ v = np.asarray(v, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
2628
+ if len(v) != x.shape[0]:
2629
+ err_msg = f'{fname}: length of `v` is not valid'
2630
+ if logger: logger.error(err_msg)
2631
+ raise SrfError(err_msg)
2632
+
2633
+ # Number of conditioning points
2634
+ npt = x.shape[0]
2635
+
2636
+ # Get index in mean_T1 for each conditioning points
2637
+ x_mean_T1_grid_ind = None
2638
+ if mean_T1.size == 1:
2639
+ x_mean_T1_grid_ind = np.zeros(npt, dtype='int')
2640
+ else:
2641
+ indc_f = (x-origin)/spacing
2642
+ indc = indc_f.astype(int)
2643
+ indc = indc - 1 * np.all((indc == indc_f, indc > 0), axis=0)
2644
+ if d == 1:
2645
+ x_mean_T1_grid_ind = 1 * indc[:, 0] # multiply by 1.0 makes a copy of the array !
2646
+ elif d == 2:
2647
+ x_mean_T1_grid_ind = indc[:, 0] + dimension[0] * indc[:, 1]
2648
+ elif d == 3:
2649
+ x_mean_T1_grid_ind = indc[:, 0] + dimension[0] * (indc[:, 1] + dimension[1] * indc[:, 2])
2650
+
2651
+ # Get index in var_T1 (if not None) for each conditioning points
2652
+ if var_T1 is not None:
2653
+ if var_T1.size == 1:
2654
+ x_var_T1_grid_ind = np.zeros(npt, dtype='int')
2655
+ else:
2656
+ if x_mean_T1_grid_ind is not None:
2657
+ x_var_T1_grid_ind = x_mean_T1_grid_ind
2658
+ else:
2659
+ indc_f = (x-origin)/spacing
2660
+ indc = indc_f.astype(int)
2661
+ indc = indc - 1 * np.all((indc == indc_f, indc > 0), axis=0)
2662
+ if d == 1:
2663
+ x_var_T1_grid_ind = 1 * indc[:, 0] # multiply by 1.0 makes a copy of the array !
2664
+ elif d == 2:
2665
+ x_var_T1_grid_ind = indc[:, 0] + dimension[0] * indc[:, 1]
2666
+ elif d == 3:
2667
+ x_var_T1_grid_ind = indc[:, 0] + dimension[0] * (indc[:, 1] + dimension[1] * indc[:, 2])
2668
+
2669
+ # Get index in mean_T2 for each conditioning points
2670
+ x_mean_T2_grid_ind = None
2671
+ if mean_T2.size == 1:
2672
+ x_mean_T2_grid_ind = np.zeros(npt, dtype='int')
2673
+ else:
2674
+ indc_f = (x-origin)/spacing
2675
+ indc = indc_f.astype(int)
2676
+ indc = indc - 1 * np.all((indc == indc_f, indc > 0), axis=0)
2677
+ if d == 1:
2678
+ x_mean_T2_grid_ind = 1 * indc[:, 0] # multiply by 1.0 makes a copy of the array !
2679
+ elif d == 2:
2680
+ x_mean_T2_grid_ind = indc[:, 0] + dimension[0] * indc[:, 1]
2681
+ elif d == 3:
2682
+ x_mean_T2_grid_ind = indc[:, 0] + dimension[0] * (indc[:, 1] + dimension[1] * indc[:, 2])
2683
+
2684
+ # Get index in var_T2 (if not None) for each conditioning points
2685
+ if var_T2 is not None:
2686
+ if var_T2.size == 1:
2687
+ x_var_T2_grid_ind = np.zeros(npt, dtype='int')
2688
+ else:
2689
+ if x_mean_T2_grid_ind is not None:
2690
+ x_var_T2_grid_ind = x_mean_T2_grid_ind
2691
+ else:
2692
+ indc_f = (x-origin)/spacing
2693
+ indc = indc_f.astype(int)
2694
+ indc = indc - 1 * np.all((indc == indc_f, indc > 0), axis=0)
2695
+ if d == 1:
2696
+ x_var_T2_grid_ind = 1 * indc[:, 0] # multiply by 1.0 makes a copy of the array !
2697
+ elif d == 2:
2698
+ x_var_T2_grid_ind = indc[:, 0] + dimension[0] * indc[:, 1]
2699
+ elif d == 3:
2700
+ x_var_T2_grid_ind = indc[:, 0] + dimension[0] * (indc[:, 1] + dimension[1] * indc[:, 2])
2701
+
2702
+ # Get covariance function for T1, T2 and Y, and their evaluation at 0
2703
+ if cov_model_T1 is not None:
2704
+ cov_func_T1 = cov_model_T1.func() # covariance function
2705
+ cov0_T1 = cov_func_T1(np.zeros(d))
2706
+ if cov_model_T2 is not None:
2707
+ cov_func_T2 = cov_model_T2.func() # covariance function
2708
+ cov0_T2 = cov_func_T2(np.zeros(d))
2709
+ cov_func_Y = cov_model_Y.func() # covariance function
2710
+ cov0_Y = cov_func_Y(np.zeros(2))
2711
+
2712
+ # Set mean_Y
2713
+ if mean_Y is None:
2714
+ mean_Y = np.mean(v)
2715
+
2716
+ if cov_model_T1 is not None:
2717
+ # Set kriging matrix for T1 (mat_T1) of order npt, "over every conditioining point"
2718
+ mat_T1 = np.ones((npt, npt))
2719
+ for i in range(npt-1):
2720
+ # lag between x[i] and x[j], j=i+1, ..., npt-1
2721
+ h = x[(i+1):] - x[i]
2722
+ cov_h_T1 = cov_func_T1(h)
2723
+ mat_T1[i, (i+1):npt] = cov_h_T1
2724
+ mat_T1[(i+1):npt, i] = cov_h_T1
2725
+ mat_T1[i, i] = cov0_T1
2726
+
2727
+ mat_T1[-1,-1] = cov0_T1
2728
+
2729
+ if var_T1 is not None:
2730
+ varUpdate = np.sqrt(var_T1[x_var_T1_grid_ind]/cov0_T1)
2731
+ mat_T1 = varUpdate*(mat_T1.T*varUpdate).T
2732
+
2733
+ if cov_model_T2 is not None:
2734
+ # Set kriging matrix for T2 (mat_T2) of order npt, "over every conditioining point"
2735
+ mat_T2 = np.ones((npt, npt))
2736
+ for i in range(npt-1):
2737
+ # lag between x[i] and x[j], j=i+1, ..., npt-1
2738
+ h = x[(i+1):] - x[i]
2739
+ cov_h_T2 = cov_func_T2(h)
2740
+ mat_T2[i, (i+1):npt] = cov_h_T2
2741
+ mat_T2[(i+1):npt, i] = cov_h_T2
2742
+ mat_T2[i, i] = cov0_T2
2743
+
2744
+ mat_T2[-1,-1] = cov0_T2
2745
+
2746
+ if var_T2 is not None:
2747
+ varUpdate = np.sqrt(var_T2[x_var_T2_grid_ind]/cov0_T2)
2748
+ mat_T2 = varUpdate*(mat_T2.T*varUpdate).T
2749
+
2750
+ # Initialize
2751
+ # - npt_ext: number of total conditioning point for Y, "point (T1(x), T2(x)) + additional constraint t"
2752
+ # - v_T: values of (T1(x), T2(x)) (that are defined later) followed by values yt at additional constraint t"
2753
+ # - v_ext: values for Y at "point (T1(x), T2(x)) + additional constraint (t)"
2754
+ # - mat_Y: kriging matrix for Y of order npt_ext, over "point (T1(x), T2(x)) + additional constraint t"
2755
+ if t is None:
2756
+ npt_ext = npt
2757
+ v_T = np.zeros((npt, 2))
2758
+ v_ext = v
2759
+ mat_Y = np.ones((npt_ext, npt_ext))
2760
+ else:
2761
+ npt_ext = npt + len(t)
2762
+ v_T = np.vstack((np.zeros((npt, 2)), t))
2763
+ v_ext = np.hstack((v, yt))
2764
+ mat_Y = np.ones((npt_ext, npt_ext))
2765
+ for i in range(len(t)-1):
2766
+ # lag between t[i] and t[j], j=i+1, ..., len(t)-1
2767
+ h = t[(i+1):] - t[i]
2768
+ cov_h_Y = cov_func_Y(h)
2769
+ k = i + npt
2770
+ mat_Y[k, (k+1):] = cov_h_Y
2771
+ mat_Y[(k+1):, k] = cov_h_Y
2772
+ #mat_Y[k, k] = cov0_Y
2773
+
2774
+ #mat_Y[-1,-1] = cov0_Y
2775
+ for i in range(npt_ext):
2776
+ mat_Y[i, i] = cov0_Y
2777
+ #
2778
+ if npt_ext <= 1:
2779
+ mh_iter = 0 # unnecessary to apply Metropolis update !
2780
+
2781
+ # Set (again if given) default parameter 'mean' and 'var' for T1, T2, and 'mean' for Y
2782
+ if cov_model_T1 is not None:
2783
+ params_T1['mean'] = mean_T1
2784
+ params_T1['var'] = var_T1
2785
+ else:
2786
+ if mean_T1.size == grid_size:
2787
+ params_T1['mean'] = mean_T1.reshape(*dimension[::-1])
2788
+ else:
2789
+ params_T1['mean'] = mean_T1 * np.ones(dimension[::-1])
2790
+ if cov_model_T2 is not None:
2791
+ params_T2['mean'] = mean_T2
2792
+ params_T2['var'] = var_T2
2793
+ else:
2794
+ if mean_T2.size == grid_size:
2795
+ params_T2['mean'] = mean_T2.reshape(*dimension[::-1])
2796
+ else:
2797
+ params_T2['mean'] = mean_T2 * np.ones(dimension[::-1])
2798
+ params_Y['mean'] = mean_Y
2799
+
2800
+ # Set default parameter 'verbose' for params_T1, params_T2 and params_Y
2801
+ if 'verbose' not in params_T1.keys():
2802
+ params_T1['verbose'] = 0
2803
+ # params_T1['verbose'] = verbose
2804
+ if 'verbose' not in params_T2.keys():
2805
+ params_T2['verbose'] = 0
2806
+ # params_T2['verbose'] = verbose
2807
+ if 'verbose' not in params_Y.keys():
2808
+ params_Y['verbose'] = 0
2809
+ # params_Y['verbose'] = verbose
2810
+
2811
+ # Initialization for output
2812
+ Z = []
2813
+ if full_output:
2814
+ T1 = []
2815
+ T2 = []
2816
+ Y = []
2817
+
2818
+ for ireal in range(nreal):
2819
+ # Generate ireal-th realization
2820
+ if verbose > 1:
2821
+ if logger:
2822
+ logger.info(f'{fname}: simulation {ireal+1} of {nreal}...')
2823
+ else:
2824
+ print(f'{fname}: simulation {ireal+1} of {nreal}...')
2825
+ for ntry in range(ntry_max):
2826
+ sim_ok = True
2827
+ Y_cond_aggregation = False
2828
+ if verbose > 2 and ntry > 0:
2829
+ if logger:
2830
+ logger.info(f'{fname}: ... new trial ({ntry+1} of {ntry_max}) for simulation {ireal+1} of {nreal}...')
2831
+ else:
2832
+ print(f'{fname}: ... new trial ({ntry+1} of {ntry_max}) for simulation {ireal+1} of {nreal}...')
2833
+ if x is None:
2834
+ # Unconditional case
2835
+ # ------------------
2836
+ # Generate T1 (one real)
2837
+ if cov_model_T1 is not None:
2838
+ try:
2839
+ sim_T1 = multiGaussian.multiGaussianRun(
2840
+ cov_model_T1, dimension, spacing, origin,
2841
+ mode='simulation', algo=algo_T1, output_mode='array',
2842
+ **params_T1, nreal=1, logger=logger)
2843
+ except:
2844
+ sim_ok = False
2845
+ if verbose > 2:
2846
+ if logger:
2847
+ logger.info(f'{fname}: ... simulation of T1 failed')
2848
+ else:
2849
+ print(f'{fname}: ... simulation of T1 failed')
2850
+ continue
2851
+ # except Exception as exc:
2852
+ # err_msg = f'{fname}: simulation of T1 failed'
2853
+ # if logger: logger.error(err_msg)
2854
+ # raise SrfError(err_msg) from exc
2855
+
2856
+ else:
2857
+ sim_T1 = params_T1['mean'].reshape(1,*dimension[::-1])
2858
+ # -> sim_T1: nd-array of shape
2859
+ # (1, dimension) (for T1 in 1D)
2860
+ # (1, dimension[1], dimension[0]) (for T1 in 2D)
2861
+ # (1, dimension[2], dimension[1], dimension[0]) (for T1 in 3D)
2862
+
2863
+ # Generate T2 (one real)
2864
+ if cov_model_T2 is not None:
2865
+ try:
2866
+ sim_T2 = multiGaussian.multiGaussianRun(
2867
+ cov_model_T2, dimension, spacing, origin,
2868
+ mode='simulation', algo=algo_T2, output_mode='array',
2869
+ **params_T2, nreal=1, logger=logger)
2870
+ except:
2871
+ sim_ok = False
2872
+ if verbose > 2:
2873
+ if logger:
2874
+ logger.info(f'{fname}: ... simulation of T2 failed')
2875
+ else:
2876
+ print(f'{fname}: ... simulation of T2 failed')
2877
+ continue
2878
+ # except Exception as exc:
2879
+ # err_msg = f'{fname}: simulation of T2 failed'
2880
+ # if logger: logger.error(err_msg)
2881
+ # raise SrfError(err_msg) from exc
2882
+ else:
2883
+ sim_T2 = params_T2['mean'].reshape(1,*dimension[::-1])
2884
+ # -> sim_T2: nd-array of shape
2885
+ # (1, dimension) (for T2 in 1D)
2886
+ # (1, dimension[1], dimension[0]) (for T2 in 2D)
2887
+ # (1, dimension[2], dimension[1], dimension[0]) (for T2 in 3D)
2888
+
2889
+ # Set origin and dimension for Y
2890
+ origin_Y = [0.0, 0.0]
2891
+ dimension_Y = [0, 0]
2892
+
2893
+ min_T1 = np.min(sim_T1)
2894
+ max_T1 = np.max(sim_T1)
2895
+ if t is not None:
2896
+ min_T1 = min(t[:, 0].min(), min_T1)
2897
+ max_T1 = max(t[:, 0].max(), max_T1)
2898
+ min_T1 = min_T1 - 0.5 * spacing_Y[0]
2899
+ max_T1 = max_T1 + 0.5 * spacing_Y[0]
2900
+ dimension_Y[0] = int(np.ceil((max_T1 - min_T1)/spacing_Y[0]))
2901
+ origin_Y[0] = min_T1 - 0.5*(dimension_Y[0]*spacing_Y[0] - (max_T1 - min_T1))
2902
+
2903
+ min_T2 = np.min(sim_T2)
2904
+ max_T2 = np.max(sim_T2)
2905
+ if t is not None:
2906
+ min_T2 = min(t[:, 1].min(), min_T2)
2907
+ max_T2 = max(t[:, 1].max(), max_T2)
2908
+ min_T2 = min_T2 - 0.5 * spacing_Y[1]
2909
+ max_T2 = max_T2 + 0.5 * spacing_Y[1]
2910
+ dimension_Y[1] = int(np.ceil((max_T2 - min_T2)/spacing_Y[1]))
2911
+ origin_Y[1] = min_T2 - 0.5*(dimension_Y[1]*spacing_Y[1] - (max_T2 - min_T2))
2912
+
2913
+ # Generate Y conditional to possible additional constraint (t, yt) (one real)
2914
+ try:
2915
+ sim_Y = multiGaussian.multiGaussianRun(
2916
+ cov_model_Y, dimension_Y, spacing_Y, origin_Y, x=t, v=yt,
2917
+ mode='simulation', algo=algo_Y, output_mode='array',
2918
+ **params_Y, nreal=1, logger=logger)
2919
+ except:
2920
+ sim_ok = False
2921
+ if verbose > 2:
2922
+ if logger:
2923
+ logger.info(f'{fname}: ... simulation of Y failed')
2924
+ else:
2925
+ print(f'{fname}: ... simulation of Y failed')
2926
+ continue
2927
+ # except Exception as exc:
2928
+ # err_msg = f'{fname}: simulation of Y failed'
2929
+ # if logger: logger.error(err_msg)
2930
+ # raise SrfError(err_msg) from exc
2931
+
2932
+ # -> 3d-array of shape (1, dimension_Y[1], dimension_Y[0])
2933
+
2934
+ else:
2935
+ # Conditional case
2936
+ # ----------------
2937
+ # Initialize: unconditional simulation of T1 at x (values in v_T[:,0])
2938
+ ind = np.random.permutation(npt)
2939
+ for j, k in enumerate(ind):
2940
+ if cov_model_T1 is not None:
2941
+ # Simulate value at x[k] (= x[ind[j]]), conditionally to the previous ones
2942
+ # Solve the kriging system (for T1)
2943
+ try:
2944
+ w = np.linalg.solve(
2945
+ mat_T1[ind[:j], :][:, ind[:j]], # kriging matrix
2946
+ mat_T1[ind[:j], ind[j]], # second member
2947
+ )
2948
+ except:
2949
+ sim_ok = False
2950
+ break
2951
+
2952
+ # Mean (kriged) value at x[k]
2953
+ mu_T1_k = mean_T1[x_mean_T1_grid_ind[k]] + (v_T[ind[:j], 0] - mean_T1[x_mean_T1_grid_ind[ind[:j]]]).dot(w)
2954
+ # Standard deviation (of kriging) at x[k]
2955
+ std_T1_k = np.sqrt(np.maximum(0, cov0_T1 - np.dot(w, mat_T1[ind[:j], ind[j]])))
2956
+ # Draw value in N(mu_T1_k, std_T1_k^2)
2957
+ v_T[k, 0] = np.random.normal(loc=mu_T1_k, scale=std_T1_k)
2958
+ else:
2959
+ v_T[k, 0] = mean_T1[x_mean_T1_grid_ind[k]]
2960
+
2961
+ if not sim_ok:
2962
+ sim_ok = False
2963
+ if verbose > 2:
2964
+ if logger:
2965
+ logger.info(f'{fname}: ... cannot solve kriging system (for T1, initialization)')
2966
+ else:
2967
+ print(f'{fname}: ... cannot solve kriging system (for T1, initialization)')
2968
+ continue
2969
+
2970
+ # Initialize: unconditional simulation of T2 at x (values in v_T[:,1])
2971
+ ind = np.random.permutation(npt)
2972
+ for j, k in enumerate(ind):
2973
+ if cov_model_T2 is not None:
2974
+ # Simulate value at x[k] (= x[ind[j]]), conditionally to the previous ones
2975
+ # Solve the kriging system (for T2)
2976
+ try:
2977
+ w = np.linalg.solve(
2978
+ mat_T2[ind[:j], :][:, ind[:j]], # kriging matrix
2979
+ mat_T2[ind[:j], ind[j]], # second member
2980
+ )
2981
+ except:
2982
+ sim_ok = False
2983
+ break
2984
+
2985
+ # Mean (kriged) value at x[k]
2986
+ mu_T2_k = mean_T2[x_mean_T2_grid_ind[k]] + (v_T[ind[:j], 1] - mean_T2[x_mean_T2_grid_ind[ind[:j]]]).dot(w)
2987
+ # Standard deviation (of kriging) at x[k]
2988
+ std_T2_k = np.sqrt(np.maximum(0, cov0_T2 - np.dot(w, mat_T2[ind[:j], ind[j]])))
2989
+ # Draw value in N(mu_T2_k, std_T2_k^2)
2990
+ v_T[k, 1] = np.random.normal(loc=mu_T2_k, scale=std_T2_k)
2991
+ else:
2992
+ v_T[k, 1] = mean_T2[x_mean_T2_grid_ind[k]]
2993
+
2994
+ if not sim_ok:
2995
+ sim_ok = False
2996
+ if verbose > 2:
2997
+ if logger:
2998
+ logger.info(f'{fname}: ... cannot solve kriging system (for T2, initialization)')
2999
+ else:
3000
+ print(f'{fname}: ... cannot solve kriging system (for T2, initialization)')
3001
+ continue
3002
+
3003
+ # Updated kriging matrix for Y (mat_Y) according to value in v_T[0:npt]
3004
+ for i in range(npt-1):
3005
+ # lag between v_T[i] and v_T[j], j=i+1, ..., npt-1
3006
+ h = v_T[(i+1):npt] - v_T[i]
3007
+ cov_h_Y = cov_func_Y(h)
3008
+ mat_Y[i, (i+1):npt] = cov_h_Y
3009
+ mat_Y[(i+1):npt, i] = cov_h_Y
3010
+ # mat_Y[i, i] = cov0_Y
3011
+
3012
+ for i, k in enumerate(range(npt, npt_ext)):
3013
+ # lag between t[i] and v_T[j], j=0, ..., npt-1
3014
+ h = v_T[0:npt] - t[i]
3015
+ cov_h_Y = cov_func_Y(h)
3016
+ mat_Y[k, 0:npt] = cov_h_Y
3017
+ mat_Y[0:npt, k] = cov_h_Y
3018
+ # mat_Y[i, i] = cov0_Y
3019
+
3020
+ # mat_Y[-1,-1] = cov0_Y
3021
+
3022
+ # Update simulated values v_T at x using Metropolis-Hasting (MH) algorithm
3023
+ v_T_k_new = np.zeros(2)
3024
+ for nit in range(mh_iter):
3025
+ if verbose > 3:
3026
+ if logger:
3027
+ logger.info(f'{fname}: ... sim {ireal+1} of {nreal}: MH iter {nit+1} of {mh_iter}...')
3028
+ else:
3029
+ print(f'{fname}: ... sim {ireal+1} of {nreal}: MH iter {nit+1} of {mh_iter}...')
3030
+ ind = np.random.permutation(npt)
3031
+ for k in ind:
3032
+ # Sequence of indexes without k
3033
+ indmat = np.hstack((np.arange(k), np.arange(k+1, npt)))
3034
+ # Simulate possible new value v_T_new at x[k], conditionally to all the ohter ones
3035
+ #
3036
+ if cov_model_T1 is not None:
3037
+ # Solve the kriging system for T1
3038
+ try:
3039
+ w = np.linalg.solve(
3040
+ mat_T1[indmat, :][:, indmat], # kriging matrix
3041
+ mat_T1[indmat, k], # second member
3042
+ )
3043
+ except:
3044
+ sim_ok = False
3045
+ if verbose > 2:
3046
+ if logger:
3047
+ logger.info(f'{fname}: ... cannot solve kriging system (for T1)')
3048
+ else:
3049
+ print(f'{fname}: ... cannot solve kriging system (for T1)')
3050
+ break
3051
+ #
3052
+ # Mean (kriged) value at x[k]
3053
+ mu_T1_k = mean_T1[x_mean_T1_grid_ind[k]] + (v_T[indmat, 0] - mean_T1[x_mean_T1_grid_ind[indmat]]).dot(w)
3054
+ # Standard deviation (of kriging) at x[k]
3055
+ std_T1_k = np.sqrt(np.maximum(0, cov0_T1 - np.dot(w, mat_T1[indmat, k])))
3056
+ # Draw value in N(mu, std^2)
3057
+ v_T_k_new[0] = np.random.normal(loc=mu_T1_k, scale=std_T1_k)
3058
+ else:
3059
+ v_T_k_new[0] = mean_T1[x_mean_T1_grid_ind[k]]
3060
+ #
3061
+ # Solve the kriging system for T2
3062
+ if cov_model_T2 is not None:
3063
+ try:
3064
+ w = np.linalg.solve(
3065
+ mat_T2[indmat, :][:, indmat], # kriging matrix
3066
+ mat_T2[indmat, k], # second member
3067
+ )
3068
+ except:
3069
+ sim_ok = False
3070
+ if verbose > 2:
3071
+ if logger:
3072
+ logger.info(f'{fname}: ... cannot solve kriging system (for T2)')
3073
+ else:
3074
+ print(f'{fname}: ... cannot solve kriging system (for T2)')
3075
+ break
3076
+ #
3077
+ # Mean (kriged) value at x[k]
3078
+ mu_T2_k = mean_T2[x_mean_T2_grid_ind[k]] + (v_T[indmat, 1] - mean_T2[x_mean_T2_grid_ind[indmat]]).dot(w)
3079
+ # Standard deviation (of kriging) at x[k]
3080
+ std_T2_k = np.sqrt(np.maximum(0, cov0_T2 - np.dot(w, mat_T2[indmat, k])))
3081
+ # Draw value in N(mu, std^2)
3082
+ v_T_k_new[1] = np.random.normal(loc=mu_T2_k, scale=std_T2_k)
3083
+ else:
3084
+ v_T_k_new[1] = mean_T2[x_mean_T2_grid_ind[k]]
3085
+ #
3086
+ # Compute MH quotient defined as
3087
+ # prob(Y[v_T_k_new] = v[k] | Y[indmat] = v[indmat], Y[t] = yt) / prob(Y[v_T[k]] = v[k] | Y[indmat] = v[indmat], Y[t] = yt)
3088
+ # (where Y[t]=yt are the possible additional constraint)
3089
+ #
3090
+ # New lag from v_T_k_new and corresponding covariance for Y #################
3091
+ h_k_new = v_T_k_new - np.vstack((v_T[:k], v_T_k_new, v_T[k+1:]))
3092
+ cov_h_Y_k_new = cov_func_Y(h_k_new)
3093
+ # Solve the kriging system for Y for simulation at v_T[k] and at v_T_k_new
3094
+ indmat_ext = np.hstack((indmat, np.arange(npt, npt_ext)))
3095
+ try:
3096
+ w = np.linalg.solve(
3097
+ mat_Y[indmat_ext, :][:, indmat_ext], # kriging matrix
3098
+ np.vstack((mat_Y[indmat_ext, k], cov_h_Y_k_new[indmat_ext])).T # both second members
3099
+ )
3100
+ except:
3101
+ sim_ok = False
3102
+ if verbose > 2:
3103
+ if logger:
3104
+ logger.info(f'{fname}: ... cannot solve kriging system (for Y)')
3105
+ else:
3106
+ print(f'{fname}: ... cannot solve kriging system (for Y)')
3107
+ break
3108
+ # Mean (kriged) values at v_T[k] and v_T_k_new
3109
+ mu_Y_k = mean_Y + (v_ext[indmat_ext] - mean_Y).dot(w) # mu_k of shape(2, )
3110
+ # Variance (of kriging) at v_T[k] and v_T_k_new
3111
+ var_Y_k = np.maximum(1.e-20, cov0_Y - np.array([np.dot(w[:,0], mat_Y[indmat_ext, k]), np.dot(w[:,1], cov_h_Y_k_new[indmat_ext])]))
3112
+ # Set minimal variance to 1.e-20 to avoid division by zero
3113
+ #
3114
+ # MH quotient is
3115
+ # phi_{mean=mu_Y_k[1], var=var_Y_k[1]}(v[k]) / phi_{mean=mu_Y_k[0], var=var_Y_k[0]}(v[k])
3116
+ # where phi_{mean, var} is the pdf of the normal law of given mean and var
3117
+ # To avoid overflow in exp, compute log of mh quotient...
3118
+ log_mh_quotient = 0.5 * (np.log(var_Y_k[0]) + (v[k]-mu_Y_k[0])**2/var_Y_k[0] - np.log(var_Y_k[1]) - (v[k]-mu_Y_k[1])**2/var_Y_k[1])
3119
+ if log_mh_quotient >= 0.0 or np.random.random() < np.exp(log_mh_quotient):
3120
+ # Accept new value v_T_new at x[k]
3121
+ v_T[k] = v_T_k_new
3122
+ # Update kriging matrix for Y
3123
+ mat_Y[k,:] = cov_h_Y_k_new
3124
+ mat_Y[:,k] = cov_h_Y_k_new
3125
+ if not sim_ok:
3126
+ break
3127
+
3128
+ if not sim_ok:
3129
+ continue
3130
+
3131
+ # Generate T1 conditional to (x, v_T[0:npt, 0]) (one real)
3132
+ if cov_model_T1 is not None:
3133
+ try:
3134
+ sim_T1 = multiGaussian.multiGaussianRun(
3135
+ cov_model_T1, dimension, spacing, origin, x=x, v=v_T[:npt, 0],
3136
+ mode='simulation', algo=algo_T1, output_mode='array',
3137
+ **params_T1, nreal=1, logger=logger)
3138
+ except:
3139
+ sim_ok = False
3140
+ if verbose > 2:
3141
+ if logger:
3142
+ logger.info(f'{fname}: ... conditional simulation of T1 failed')
3143
+ else:
3144
+ print(f'{fname}: ... conditional simulation of T1 failed')
3145
+ continue
3146
+ # except Exception as exc:
3147
+ # err_msg = f'{fname}: conditional simulation of T1 failed'
3148
+ # if logger: logger.error(err_msg)
3149
+ # raise SrfError(err_msg) from exc
3150
+ else:
3151
+ sim_T1 = params_T1['mean'].reshape(1,*dimension[::-1])
3152
+ # -> sim_T1: nd-array of shape
3153
+ # (1, dimension) (for T1 in 1D)
3154
+ # (1, dimension[1], dimension[0]) (for T1 in 2D)
3155
+ # (1, dimension[2], dimension[1], dimension[0]) (for T1 in 3D)
3156
+
3157
+ # Generate T2 conditional to (x, v_T[0:npt, 1]) (one real)
3158
+ if cov_model_T2 is not None:
3159
+ try:
3160
+ sim_T2 = multiGaussian.multiGaussianRun(
3161
+ cov_model_T2, dimension, spacing, origin, x=x, v=v_T[:npt, 1],
3162
+ mode='simulation', algo=algo_T2, output_mode='array',
3163
+ **params_T2, nreal=1, logger=logger)
3164
+ except:
3165
+ sim_ok = False
3166
+ if verbose > 2:
3167
+ if logger:
3168
+ logger.info(f'{fname}: ... conditional simulation of T2 failed')
3169
+ else:
3170
+ print(f'{fname}: ... conditional simulation of T2 failed')
3171
+ continue
3172
+ # except Exception as exc:
3173
+ # err_msg = f'{fname}: conditional simulation of T2 failed'
3174
+ # if logger: logger.error(err_msg)
3175
+ # raise SrfError(err_msg) from exc
3176
+ else:
3177
+ sim_T2 = params_T2['mean'].reshape(1,*dimension[::-1])
3178
+ # -> sim_T2: nd-array of shape
3179
+ # (1, dimension) (for T2 in 1D)
3180
+ # (1, dimension[1], dimension[0]) (for T2 in 2D)
3181
+ # (1, dimension[2], dimension[1], dimension[0]) (for T2 in 3D)
3182
+
3183
+ # Set origin and dimension for Y
3184
+ origin_Y = [0.0, 0.0]
3185
+ dimension_Y = [0, 0]
3186
+
3187
+ min_T1 = np.min(sim_T1)
3188
+ max_T1 = np.max(sim_T1)
3189
+ if t is not None:
3190
+ min_T1 = min(t[:, 0].min(), min_T1)
3191
+ max_T1 = max(t[:, 0].max(), max_T1)
3192
+ min_T1 = min_T1 - 0.5 * spacing_Y[0]
3193
+ max_T1 = max_T1 + 0.5 * spacing_Y[0]
3194
+ dimension_Y[0] = int(np.ceil((max_T1 - min_T1)/spacing_Y[0]))
3195
+ origin_Y[0] = min_T1 - 0.5*(dimension_Y[0]*spacing_Y[0] - (max_T1 - min_T1))
3196
+
3197
+ min_T2 = np.min(sim_T2)
3198
+ max_T2 = np.max(sim_T2)
3199
+ if t is not None:
3200
+ min_T2 = min(t[:, 1].min(), min_T2)
3201
+ max_T2 = max(t[:, 1].max(), max_T2)
3202
+ min_T2 = min_T2 - 0.5 * spacing_Y[1]
3203
+ max_T2 = max_T2 + 0.5 * spacing_Y[1]
3204
+ dimension_Y[1] = int(np.ceil((max_T2 - min_T2)/spacing_Y[1]))
3205
+ origin_Y[1] = min_T2 - 0.5*(dimension_Y[1]*spacing_Y[1] - (max_T2 - min_T2))
3206
+
3207
+ # Compute
3208
+ # indc: node index of conditioning node (nearest node),
3209
+ # rounded to lower index if between two grid node and index is positive
3210
+ indc_f = (v_T-origin_Y)/spacing_Y
3211
+ indc = indc_f.astype(int)
3212
+ indc = indc - 1 * np.all((indc == indc_f, indc > 0), axis=0)
3213
+ indc = indc[0] + indc[1] * dimension_Y[0] # single-indices
3214
+
3215
+ indc_unique, indc_inv = np.unique(indc, return_inverse=True)
3216
+ if len(indc_unique) == len(indc):
3217
+ v_T_unique = v_T
3218
+ v_ext_unique = v_ext
3219
+ else:
3220
+ Y_cond_aggregation = True
3221
+ v_T_unique = np.array([v_T[indc_inv==j].mean() for j in range(len(indc_unique))])
3222
+ v_ext_unique = np.array([v_ext[indc_inv==j].mean() for j in range(len(indc_unique))])
3223
+
3224
+ # Generate Y conditional to (v_T, v_ext) (one real)
3225
+ try:
3226
+ sim_Y = multiGaussian.multiGaussianRun(
3227
+ cov_model_Y, dimension_Y, spacing_Y, origin_Y, x=v_T_unique, v=v_ext_unique,
3228
+ mode='simulation', algo=algo_Y, output_mode='array',
3229
+ **params_Y, nreal=1, logger=logger)
3230
+ except:
3231
+ sim_ok = False
3232
+ if verbose > 2:
3233
+ if logger:
3234
+ logger.info(f'{fname}: ... conditional simulation of Y failed')
3235
+ else:
3236
+ print(f'{fname}: ... conditional simulation of Y failed')
3237
+ continue
3238
+ # except Exception as exc:
3239
+ # err_msg = f'{fname}: conditional simulation of Y failed'
3240
+ # if logger: logger.error(err_msg)
3241
+ # raise SrfError(err_msg) from exc
3242
+
3243
+ # -> 3d-array of shape (1, dimension_Y[1], dimension_Y[0])
3244
+
3245
+ # Generate Z (one real)
3246
+ # Compute
3247
+ # ind1, ind2: node index (nearest node),
3248
+ # rounded to lower index if between two grid nodes and index is positive
3249
+ ind_f = (sim_T1.reshape(-1) - origin_Y[0])/spacing_Y[0]
3250
+ ind1 = ind_f.astype(int)
3251
+ ind1 = ind1 - 1 * np.all((ind1 == ind_f, ind1 > 0), axis=0)
3252
+ ind_f = (sim_T2.reshape(-1) - origin_Y[1])/spacing_Y[1]
3253
+ ind2 = ind_f.astype(int)
3254
+ ind2 = ind2 - 1 * np.all((ind2 == ind_f, ind2 > 0), axis=0)
3255
+ Z_real = np.array([sim_Y[0, jj, ii] for ii, jj in zip(ind1, ind2)])
3256
+ #Z_real = np.array([sim_Y[0, j, i] for i, j in zip(np.floor((sim_T1.reshape(-1) - origin_Y[0])/spacing_Y[0]).astype(int), np.floor((sim_T2.reshape(-1) - origin_Y[1])/spacing_Y[1]).astype(int))])
3257
+ if vmin is not None and Z_real.min() < vmin:
3258
+ sim_ok = False
3259
+ if verbose > 2:
3260
+ if logger:
3261
+ logger.info(f'{fname}: ... specified minimal value not honoured')
3262
+ else:
3263
+ print(f'{fname}: ... specified minimal value not honoured')
3264
+ continue
3265
+ if vmax is not None and Z_real.max() > vmax:
3266
+ sim_ok = False
3267
+ if verbose > 2:
3268
+ if logger:
3269
+ logger.info(f'{fname}: ... specified maximal value not honoured')
3270
+ else:
3271
+ print(f'{fname}: ... specified maximal value not honoured')
3272
+ continue
3273
+
3274
+ if sim_ok:
3275
+ if Y_cond_aggregation and verbose > 0:
3276
+ if logger:
3277
+ logger.warning(f'{fname}: conditioning points for Y falling in a same grid cell have been aggregated (mean) (real index {ireal})')
3278
+ else:
3279
+ print(f'{fname}: WARNING: conditioning points for Y falling in a same grid cell have been aggregated (mean) (real index {ireal})')
3280
+ Z.append(Z_real)
3281
+ if full_output:
3282
+ T1.append(sim_T1[0])
3283
+ T2.append(sim_T2[0])
3284
+ Y.append([dimension_Y, spacing_Y, origin_Y, sim_Y.reshape(dimension_Y[::-1])])
3285
+ break
3286
+
3287
+ # Get Z
3288
+ if verbose > 0 and len(Z) < nreal:
3289
+ if logger:
3290
+ logger.warning(f'{fname}: some realization failed (missing)')
3291
+ else:
3292
+ print(f'{fname}: WARNING: some realization failed (missing)')
3293
+
3294
+ Z = np.asarray(Z).reshape(len(Z), *np.atleast_1d(dimension)[::-1])
3295
+
3296
+ if full_output:
3297
+ T1 = np.asarray(T1).reshape(len(T1), *np.atleast_1d(dimension)[::-1])
3298
+ T2 = np.asarray(T2).reshape(len(T2), *np.atleast_1d(dimension)[::-1])
3299
+ return Z, T1, T2, Y
3300
+ else:
3301
+ return Z
3302
+ # ----------------------------------------------------------------------------
3303
+
3304
+ # # =============================================================================
3305
+ # # Function to plot details of a SRF
3306
+ # # =============================================================================
3307
+
3308
+ # import matplotlib.pyplot as plt
3309
+ # from matplotlib.gridspec import GridSpec
3310
+ # from matplotlib.markers import MarkerStyle
3311
+
3312
+ # from geone import imgplot as imgplt
3313
+
3314
+ # # ----------------------------------------------------------------------------
3315
+ # def plot_srf1D_details(im_Z, im_T, Y=None,
3316
+ # x=None, v=None, t=None, yt=None,
3317
+ # im_Z_display=None, im_T_display=None,
3318
+ # plot_dens_Z=True, plot_dens_T=True,
3319
+ # quant_Z=None, quant_T=None,
3320
+ # col_stat_Z='green', col_stat_T='orange',
3321
+ # col_x_in_im_Z='red', marker_x_in_im_Z='x', markersize_x_in_im_Z=75,
3322
+ # col_x_in_im_T='red', marker_x_in_im_T='x', markersize_x_in_im_T=75,
3323
+ # col_x_in_Y='red', marker_x_in_Y='x', markersize_x_in_Y=75,
3324
+ # col_t_in_Y='purple', marker_t_in_Y='.', markersize_t_in_Y=100,
3325
+ # ireal=0):
3326
+ # """
3327
+ # Displays (in the current figure) the details of one realization of a 1D SRF.
3328
+
3329
+ # Three following plots are displayed:
3330
+
3331
+ # - result for Z (resulting SRF)
3332
+ # - result for T (latent field, directing function)
3333
+ # - result for Y (coding process), and some statistics
3334
+
3335
+ # Note: if `Y` is not given (`None`), then only the two first plots are displayed.
3336
+
3337
+ # Z and T fields are displayed as 2D maps, with one cell along y-axis, and the
3338
+ # function :func:`plot_srf2D_details` is used with the same parameters except:
3339
+
3340
+ # **Parameters (differing)**
3341
+ # --------------------------
3342
+ # x : 1d-array of floats, or float, optional
3343
+ # data points locations (float coordinates)
3344
+ # """
3345
+ # if x is not None:
3346
+ # x = np.vstack((x, (im_Z.oy + 0.5*im_Z.sy) * np.ones_like(x))).T
3347
+
3348
+ # plot_srf2D_details(im_Z, im_T, Y=Y,
3349
+ # x=x, v=v, t=t, yt=yt,
3350
+ # im_Z_display=im_Z_display, im_T_display=im_T_display,
3351
+ # plot_dens_Z=plot_dens_Z, plot_dens_T=plot_dens_T,
3352
+ # quant_Z=quant_Z, quant_T=quant_T,
3353
+ # col_stat_Z=col_stat_Z, col_stat_T=col_stat_T,
3354
+ # col_x_in_im_Z=col_x_in_im_Z, marker_x_in_im_Z=marker_x_in_im_Z, markersize_x_in_im_Z=markersize_x_in_im_Z,
3355
+ # col_x_in_im_T=col_x_in_im_T, marker_x_in_im_T=marker_x_in_im_T, markersize_x_in_im_T=markersize_x_in_im_T,
3356
+ # col_x_in_Y=col_x_in_Y, marker_x_in_Y=marker_x_in_Y, markersize_x_in_Y=markersize_x_in_Y,
3357
+ # col_t_in_Y=col_t_in_Y, marker_t_in_Y=marker_t_in_Y, markersize_t_in_Y=markersize_t_in_Y,
3358
+ # ireal=ireal)
3359
+ # # ----------------------------------------------------------------------------
3360
+
3361
+ # # ----------------------------------------------------------------------------
3362
+ # def plot_srf2D_details(im_Z, im_T, Y=None,
3363
+ # x=None, v=None,
3364
+ # t=None, yt=None,
3365
+ # im_Z_display=None, im_T_display=None,
3366
+ # plot_dens_Z=True, plot_dens_T=True,
3367
+ # quant_Z=None, quant_T=None,
3368
+ # col_stat_Z='green', col_stat_T='orange',
3369
+ # col_x_in_im_Z='red', marker_x_in_im_Z='x', markersize_x_in_im_Z=75,
3370
+ # col_x_in_im_T='red', marker_x_in_im_T='x', markersize_x_in_im_T=75,
3371
+ # col_x_in_Y='red', marker_x_in_Y='x', markersize_x_in_Y=75,
3372
+ # col_t_in_Y='purple', marker_t_in_Y='.', markersize_t_in_Y=100,
3373
+ # ireal=0):
3374
+ # """
3375
+ # Displays (in the current figure) the details of one realization of a 1D SRF.
3376
+
3377
+ # The following plots are displayed:
3378
+
3379
+ # - result for Z (resulting SRF)
3380
+ # - result for T (latent field, directing function)
3381
+ # - result for Y (coding process), and some statistics
3382
+
3383
+ # Note: if `Y` is not given (`None`), then only the two first plots are displayed.
3384
+
3385
+ # Parameters
3386
+ # ----------
3387
+ # im_Z : :class:`geone.img.Img`
3388
+ # image containing the realizations of Z (resulting SRF),
3389
+ # each variable is one realization
3390
+
3391
+ # im_T : :class:`geone.img.Img`
3392
+ # image containing the realizations of T (latent field, directing function),
3393
+ # each variable is one realization
3394
+
3395
+ # Y : list, optional
3396
+ # list containing the realizations of Y (coding process), `Y[k]` is a list of
3397
+ # length of length 4 for the k-th realization of `Y`, with:
3398
+
3399
+ # - Y[k][0]: int, Y_nt (number of cell along t-axis)
3400
+ # - Y[k][1]: float, Y_st (cell size along t-axis)
3401
+ # - Y[k][2]: float, Y_ot (origin)
3402
+ # - Y[k][3]: 1d-array of shape (Y_nt,), values of Y[k]
3403
+
3404
+ # x : array of floats, optional
3405
+ # data points locations (float coordinates);
3406
+ # 2d-array of floats of two columns, each row being the location
3407
+ # of one conditioning point;
3408
+ # note: if only one point, a 1d-array of 2 floats is accepted
3409
+
3410
+ # v : 1d-array-like of floats, optional
3411
+ # data values at `x` (`v[i]` is the data value at `x[i]`)
3412
+
3413
+ # t : 1d-array-like of floats, or float, optional
3414
+ # values of T considered as conditioning point for Y(T) (additional constraint)
3415
+
3416
+ # yt : 1d-array-like of floats, or float, optional
3417
+ # value of Y at the conditioning point `t` (same length as `t`)
3418
+
3419
+ # im_Z_display : dict, optional
3420
+ # additional parameters for displaying im_Z (on 1st plot),
3421
+ # passed to the function :func:`geone.imgplot.drawImage2D`
3422
+
3423
+ # im_T_display : dict, optional
3424
+ # additional parameters for displaying im_T (on 2nd plot)
3425
+ # passed to the function :func:`geone.imgplot.drawImage2D`
3426
+
3427
+ # plot_dens_Z : bool, default: True
3428
+ # indicates if density of Z is displayed (on 3rd plot)
3429
+
3430
+ # plot_dens_T : bool, default: True
3431
+ # indicates if density of T is displayed (on 3rd plot)
3432
+
3433
+ # quant_Z: 1d-array of floats or float, optional
3434
+ # probability values in [0, 1] for quantiles of T to be displayed
3435
+ # (on 3rd plot), e.g.
3436
+ # `numpy.array([0., 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 1.])`
3437
+
3438
+ # quant_T: 1d-array of floats or float, optional
3439
+ # probability values in [0, 1] for quantiles of T to be displayed
3440
+ # (on 3rd plot), e.g.
3441
+ # `numpy.array([0., 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 1.])`
3442
+
3443
+ # col_stat_Z : color, default: 'green'
3444
+ # color (3-tuple (RGB code), 4-tuple (RGBA code) or str), used for
3445
+ # displaying statistics about Z (density, quantiles) (on 3rd plot)
3446
+
3447
+ # col_stat_T : color, default: 'orange'
3448
+ # color (3-tuple (RGB code), 4-tuple (RGBA code) or str), used for
3449
+ # displaying statistics about T (density, quantiles) (on 3rd plot)
3450
+
3451
+ # col_x_in_im_Z : color, default: 'red'
3452
+ # color (3-tuple (RGB code), 4-tuple (RGBA code) or str), used for
3453
+ # plotting x locations in map of T (on 1st plot)
3454
+
3455
+ # marker_x_in_im_Z : marker, default: 'x'
3456
+ # marker used for plotting x location in map of T (on 1st plot)
3457
+
3458
+ # markersize_x_in_im_Z : int, default: 75
3459
+ # marker size used for plotting x location in map of T (on 1st plot)
3460
+
3461
+ # col_x_in_im_T : color, default: 'red'
3462
+ # color (3-tuple (RGB code), 4-tuple (RGBA code) or str), used for
3463
+ # plotting x locations in map of T (on 2nd plot)
3464
+
3465
+ # marker_x_in_im_T : marker, default: 'x'
3466
+ # marker used for plotting x location in map of T (on 2nd plot)
3467
+
3468
+ # markersize_x_in_im_T : int, default: 75
3469
+ # marker size used for plotting x location in map of T (on 2nd plot)
3470
+
3471
+ # col_x_in_Y : color, default: 'purple'
3472
+ # color (3-tuple (RGB code), 4-tuple (RGBA code) or str), used for
3473
+ # plotting t locations in map of Y (on 3rd plot)
3474
+
3475
+ # marker_x_in_im_Y : marker, default: 'x'
3476
+ # marker used for t locations in map of Y (on 3rd plot)
3477
+
3478
+ # markersize_x_in_im_Y : int, default: 75
3479
+ # marker size used for t locations in map of Y (on 3rd plot)
3480
+
3481
+ # col_x_in_Y : color, default: 'red'
3482
+ # color (3-tuple (RGB code), 4-tuple (RGBA code) or str), used for
3483
+ # plotting T(x) locations in map of Y (on 3rd plot)
3484
+
3485
+ # marker_x_in_im_Y : marker, default: 'x'
3486
+ # marker used for T(x) locations in map of Y (on 3rd plot)
3487
+
3488
+ # markersize_x_in_im_Y : int, default: 75
3489
+ # marker size used for T(x) locations in map of Y (on 3rd plot)
3490
+
3491
+ # col_t_in_Y : color, default: 'purple'
3492
+ # color (3-tuple (RGB code), 4-tuple (RGBA code) or str), used for
3493
+ # plotting t locations in map of Y (on 3rd plot)
3494
+
3495
+ # marker_t_in_im_Y : marker, default: '.'
3496
+ # marker used for t locations in map of Y (on 3rd plot)
3497
+
3498
+ # markersize_t_in_im_Y : int, default: 100
3499
+ # marker size used for t locations in map of Y (on 3rd plot)
3500
+
3501
+ # ireal : int, default: 0
3502
+ # index of the realization to be displayed
3503
+ # """
3504
+ # # Initialize dictionary im_Z_display
3505
+ # if im_Z_display is None:
3506
+ # im_Z_display = {}
3507
+
3508
+ # # Initialize dictionary im_T_display
3509
+ # if im_T_display is None:
3510
+ # im_T_display = {}
3511
+
3512
+ # # Prepare figure layout
3513
+ # fig = plt.gcf()
3514
+ # fig.set_constrained_layout(True)
3515
+ # #fig = plt.figure(figsize=figsize, constrained_layout=True)
3516
+
3517
+ # nr = 2 + int(Y is not None)
3518
+ # gs = GridSpec(nr, 4, figure=fig)
3519
+ # ax1 = fig.add_subplot(gs[0:2, 0:2]) # ax for T
3520
+ # ax2 = fig.add_subplot(gs[0:2, 2:4]) # ax for Z
3521
+
3522
+ # if x is not None:
3523
+ # x = np.asarray(x, dtype='float').reshape(-1, 2) # cast in 2-dimensional array if needed
3524
+ # v = np.asarray(v, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
3525
+
3526
+ # plt.sca(ax1)
3527
+ # imgplt.drawImage2D(im_T, iv=ireal, **im_T_display)
3528
+ # if 'title' not in im_T_display.keys():
3529
+ # plt.title('Realization of T (#{})'.format(ireal))
3530
+ # if x is not None:
3531
+ # #plt.plot(x[:,0], x[:,1], ls='', color=col_x_in_im_T, marker=marker_x_in_im_T, markersize=markersize_x_in_im_T)
3532
+ # if not isinstance(col_x_in_im_T, list):
3533
+ # col_x_in_im_T = [col_x_in_im_T]
3534
+ # if not isinstance(marker_x_in_im_T, list):
3535
+ # marker_x_in_im_T = [marker_x_in_im_T]
3536
+ # if not isinstance(markersize_x_in_im_T, list):
3537
+ # markersize_x_in_im_T = [markersize_x_in_im_T]
3538
+ # for k in range(x.shape[0]):
3539
+ # marker = marker_x_in_im_T[k%len(marker_x_in_im_T)]
3540
+ # col = col_x_in_im_T[k%len(col_x_in_im_T)]
3541
+ # markersize = markersize_x_in_im_T[k%len(markersize_x_in_im_T)]
3542
+ # if MarkerStyle(marker).is_filled():
3543
+ # color={'c':'none', 'edgecolor':col}
3544
+ # else:
3545
+ # color={'c':col}
3546
+ # plt.scatter(x[k,0], x[k,1], marker=marker, s=markersize, **color)
3547
+
3548
+ # plt.sca(ax2)
3549
+ # imgplt.drawImage2D(im_Z, iv=ireal, **im_Z_display)#, yticklabels=[])#yaxis=False)
3550
+ # if 'title' not in im_Z_display.keys():
3551
+ # plt.title('Realization of Z (#{})'.format(ireal))
3552
+
3553
+ # if x is not None:
3554
+ # #plt.plot(x[:,0], x[:,1], ls='', color=col_x_in_im_Z, marker=marker_x_in_im_Z, markersize=markersize_x_in_im_Z)
3555
+ # if not isinstance(col_x_in_im_Z, list):
3556
+ # col_x_in_im_Z = [col_x_in_im_Z]
3557
+ # if not isinstance(marker_x_in_im_Z, list):
3558
+ # marker_x_in_im_Z = [marker_x_in_im_Z]
3559
+ # if not isinstance(markersize_x_in_im_Z, list):
3560
+ # markersize_x_in_im_Z = [markersize_x_in_im_Z]
3561
+ # for k in range(x.shape[0]):
3562
+ # marker = marker_x_in_im_Z[k%len(marker_x_in_im_Z)]
3563
+ # col = col_x_in_im_Z[k%len(col_x_in_im_Z)]
3564
+ # markersize = markersize_x_in_im_Z[k%len(markersize_x_in_im_Z)]
3565
+ # if MarkerStyle(marker).is_filled():
3566
+ # color={'c':'none', 'edgecolor':col}
3567
+ # else:
3568
+ # color={'c':col}
3569
+ # plt.scatter(x[k,0], x[k,1], marker=marker, s=markersize, **color)
3570
+
3571
+ # if Y is not None:
3572
+ # ax3 = fig.add_subplot(gs[2, :])
3573
+ # plt.sca(ax3)
3574
+
3575
+ # Y_nx = Y[ireal][0]
3576
+ # Y_sx = Y[ireal][1]
3577
+ # Y_ox = Y[ireal][2]
3578
+ # Y_val = Y[ireal][3]
3579
+ # y_abscissa = Y_ox + (np.arange(Y_nx)+0.5)*Y_sx
3580
+ # plt.plot(y_abscissa, Y_val)
3581
+
3582
+ # if x is not None:
3583
+ # jx = (x[:,0]-im_T.ox)/im_T.sx
3584
+ # jy = (x[:,1]-im_T.oy)/im_T.sy
3585
+ # jz = np.zeros(x.shape[0]) # (x[:,2]-im_T.oz)/im_T.sz
3586
+
3587
+ # ix = [int(a) for a in jx]
3588
+ # iy = [int(a) for a in jy]
3589
+ # iz = [int(a) for a in jz]
3590
+
3591
+ # # round to lower index if between two grid node
3592
+ # ix = [a-1 if a == b and a > 0 else a for a, b in zip(ix, jx)]
3593
+ # iy = [a-1 if a == b and a > 0 else a for a, b in zip(iy, jy)]
3594
+ # iz = [a-1 if a == b and a > 0 else a for a, b in zip(iz, jz)]
3595
+ # # plt.plot([im_T.val[ireal, izz, iyy, ixx] for ixx, iyy, izz in zip(ix, iy, iz)],
3596
+ # # [im_Z.val[ireal, izz, iyy, ixx] for ixx, iyy, izz in zip(ix, iy, iz)],
3597
+ # # ls='', color=col_x_in_Y, marker=marker_x_in_Y, markersize=markersize_x_in_Y)
3598
+ # if not isinstance(col_x_in_Y, list):
3599
+ # col_x_in_Y = [col_x_in_Y]
3600
+ # if not isinstance(marker_x_in_Y, list):
3601
+ # marker_x_in_Y = [marker_x_in_Y]
3602
+ # if not isinstance(markersize_x_in_Y, list):
3603
+ # markersize_x_in_Y = [markersize_x_in_Y]
3604
+ # for k, (ixx, iyy, izz) in enumerate(zip(ix, iy, iz)):
3605
+ # marker = marker_x_in_Y[k%len(marker_x_in_Y)]
3606
+ # col = col_x_in_Y[k%len(col_x_in_Y)]
3607
+ # markersize = markersize_x_in_Y[k%len(markersize_x_in_Y)]
3608
+ # if MarkerStyle(marker).is_filled():
3609
+ # color={'c':'none', 'edgecolor':col}
3610
+ # else:
3611
+ # color={'c':col}
3612
+ # # plt.scatter(im_T.val[ireal, izz, iyy, ixx], im_Z.val[ireal, izz, iyy, ixx], marker=marker, s=markersize, **color)
3613
+ # plt.scatter(im_T.val[ireal, izz, iyy, ixx], v[k], marker=marker, s=markersize, **color)
3614
+
3615
+ # if t is not None:
3616
+ # #plt.plot(t, interp1d(y_abscissa, Y[ireal][2])(t), ls='', color=col_t_in_Y, marker=marker_t_in_Y, markersize=markersize_t_in_Y)
3617
+ # #plt.scatter(t, interp1d(y_abscissa, Y[ireal][2])(t), marker=marker_t_in_Y, s=markersize_t_in_Y, c=col_t_in_Y)
3618
+ # plt.scatter(t, yt, marker=marker_t_in_Y, s=markersize_t_in_Y, c=col_t_in_Y)
3619
+
3620
+ # plt.title('Y(t)')
3621
+ # plt.grid()
3622
+
3623
+ # if quant_T is not None:
3624
+ # quant_T = np.atleast_1d(quant_T)
3625
+ # tq = np.quantile(im_T.val[ireal], quant_T)
3626
+ # ypos = plt.ylim()[0] + 0.05*np.diff(plt.ylim())
3627
+ # for xx, p in zip(tq, quant_T):
3628
+ # plt.axvline(xx, c=col_stat_T, ls='dashed', alpha=0.5)
3629
+ # plt.text(xx, ypos, 'p={}'.format(p), ha='center', va='bottom',
3630
+ # bbox={'facecolor':col_stat_T, 'alpha':0.2})
3631
+
3632
+ # if quant_Z is not None:
3633
+ # quant_Z = np.atleast_1d(quant_Z)
3634
+ # zq = np.quantile(im_Z.val[ireal], quant_Z)
3635
+ # xpos = plt.xlim()[0] + 0.05*np.diff(plt.xlim())
3636
+ # for yy, p in zip(zq, quant_Z):
3637
+ # plt.axhline(yy, c=col_stat_Z, ls='dashed', alpha=0.5)
3638
+ # plt.text(xpos, yy, 'p={}'.format(p), ha='center', va='bottom',
3639
+ # bbox={'facecolor':col_stat_Z, 'alpha':0.2})
3640
+
3641
+ # if plot_dens_T:
3642
+ # ax3b = ax3.twinx() # instantiate a second axes that shares the same x-axis
3643
+ # ax3b.set_ylabel('T density', color=col_stat_T) # we already handled the x-label with ax3
3644
+ # ax3b.tick_params(axis='y', labelcolor=col_stat_T)
3645
+ # plt.sca(ax3b)
3646
+ # plt.hist(im_T.val[ireal].reshape(-1), density=True, bins=40, color=col_stat_T, alpha=0.2)
3647
+ # tt = np.linspace(y_abscissa.min(), y_abscissa.max(), 100)
3648
+ # plt.plot(tt, stats.gaussian_kde(im_T.val[ireal].reshape(-1))(tt), color=col_stat_T, ls='dashed', alpha=0.5)
3649
+
3650
+ # if plot_dens_Z:
3651
+ # ax3c = ax3.twiny() # instantiate a second axes that shares the same y-axis
3652
+ # ax3c.set_xlabel('Z density', color=col_stat_Z) # we already handled the y-label with ax3
3653
+ # ax3c.tick_params(axis='x', labelcolor=col_stat_Z)
3654
+ # plt.sca(ax3c)
3655
+ # plt.hist(im_Z.val[ireal].reshape(-1), density=True, bins=40, color=col_stat_Z, alpha=0.2, orientation='horizontal')
3656
+ # z = np.linspace(im_Z.val[ireal].min(), im_Z.val[ireal].max(), 100)
3657
+ # plt.plot(stats.gaussian_kde(im_Z.val[ireal].reshape(-1))(z), z, color=col_stat_Z, ls='dashed', alpha=0.5)
3658
+
3659
+ # plt.sca(ax3)
3660
+ # #plt.show()
3661
+ # # ----------------------------------------------------------------------------