turbx 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
turbx/rgd_xpln_coh.py ADDED
@@ -0,0 +1,992 @@
1
+ import os
2
+ import re
3
+ import sys
4
+ import timeit
5
+ from concurrent.futures import ThreadPoolExecutor
6
+ from pathlib import Path, PurePosixPath
7
+
8
+ import h5py
9
+ import numpy as np
10
+ import psutil
11
+ from mpi4py import MPI
12
+ from scipy.signal import csd
13
+ from tqdm import tqdm
14
+
15
+ from .gradient import gradient
16
+ from .h5 import h5_print_contents
17
+ from .signal import ccor
18
+ from .utils import even_print, format_time_string
19
+
20
+ # ======================================================================
21
+
22
+ def _calc_wall_coh_xpln(self, **kwargs):
23
+ '''
24
+ Calculate coherence & complex cross-spectrum between turbulent
25
+ field and wall (uτ & τuy) in [t] at every [y]
26
+ ----------------------------------------------------------------
27
+ - Designed for analyzing unsteady, thin planes in [x]
28
+ - Multithreaded with ThreadPoolExecutor()
29
+ - scipy.signal.csd() automatically tries to run multithreaded
30
+ - set OMP_NUM_THREADS=1 and pass 'n_threads' to as kwarg manually
31
+ '''
32
+
33
+ if (self.rank==0):
34
+ verbose = True
35
+ else:
36
+ verbose = False
37
+
38
+ if verbose: print('\n'+'rgd.calc_wall_coh_xpln()'+'\n'+72*'-')
39
+ t_start_func = timeit.default_timer()
40
+
41
+ ## assert that the opened RGD has fsubtype 'unsteady' (i.e. is NOT a prime file)
42
+ if (self.fsubtype!='unsteady'):
43
+ raise ValueError
44
+ if not self.usingmpi:
45
+ raise NotImplementedError('function is not implemented for non-MPI usage')
46
+
47
+ h5py_is_mpi_build = h5py.h5.get_config().mpi
48
+ if not h5py_is_mpi_build:
49
+ if verbose: print('h5py was not compiled for parallel usage! exiting.')
50
+ sys.exit(1)
51
+
52
+ rx = kwargs.get('rx',1)
53
+ ry = kwargs.get('ry',1)
54
+ rz = kwargs.get('rz',1)
55
+ rt = kwargs.get('rt',1)
56
+
57
+ acc = kwargs.get('acc',6)
58
+ edge_stencil = kwargs.get('edge_stencil','full')
59
+
60
+ sy = kwargs.get('sy',1) ## N [y] layers to read at a time
61
+ if not isinstance(sy,int) or (sy<1):
62
+ raise TypeError('sy should be a positive non-zero int')
63
+
64
+ n_threads = kwargs.get('n_threads',1)
65
+
66
+ ## Debug Rank:Proc Affinity
67
+ #pp = psutil.Process()
68
+ #print(f"[Rank {self.rank}] sees CPUs: {pp.cpu_affinity()} | n_threads={n_threads} | OMP_NUM_THREADS={os.environ.get('OMP_NUM_THREADS')}")
69
+
70
+ #try:
71
+ # n_threads = int(os.environ.get('OMP_NUM_THREADS'))
72
+ #except TypeError: ## not set
73
+ # n_threads = os.cpu_count()
74
+
75
+ fn_h5_out = kwargs.get('fn_h5_out',None) ## Filename for output HDF5 (.h5) file
76
+ overlap_fac_nom = kwargs.get('overlap_fac_nom',0.50) ## Nominal windows overlap factor
77
+ n_win = kwargs.get('n_win',8) ## N segment windows for [t] PSD calc
78
+
79
+ #overlap_fac_nom = kwargs.get('overlap_fac_nom',0.5)
80
+ #n_win = kwargs.get('n_win',8)
81
+
82
+ ## Only distribute data across [y]
83
+ if (rx!=1):
84
+ raise AssertionError('rx!=1')
85
+ if (rz!=1):
86
+ raise AssertionError('rz!=1')
87
+ if (rt!=1):
88
+ raise AssertionError('rt!=1')
89
+
90
+ if not isinstance(ry,int) or (ry<1):
91
+ raise ValueError('ry should be a positive non-zero int')
92
+
93
+ ## Check the choice of ranks per dimension
94
+ if (rx*ry*rz*rt != self.n_ranks):
95
+ raise AssertionError('rx*ry*rz*rt != self.n_ranks')
96
+ if (rx>self.nx):
97
+ raise AssertionError('rx>self.nx')
98
+ if (ry>self.ny):
99
+ raise AssertionError('ry>self.ny')
100
+ if (rz>self.nz):
101
+ raise AssertionError('rz>self.nz')
102
+ if (rt>self.nt):
103
+ raise AssertionError('rt>self.nt')
104
+
105
+ if (self.ny%ry!=0):
106
+ raise ValueError('ny not divisible by ry')
107
+
108
+ ## Distribute 4D data over ranks --> here only in [y]
109
+ ryl_ = np.array_split(np.arange(self.ny,dtype=np.int64),min(ry,self.ny))
110
+ ryl = [[b[0],b[-1]+1] for b in ryl_ ]
111
+ ry1,ry2 = ryl[self.rank]
112
+ nyr = ry2 - ry1
113
+
114
+ ## Check all [y] ranges have same size
115
+ for ryl_ in ryl:
116
+ if not (ryl_[1]-ryl_[0]==nyr):
117
+ raise ValueError('[y] chunks are not even in size')
118
+
119
+ if (nyr%sy!=0):
120
+ raise ValueError('nyr not divisible by sy')
121
+
122
+ ## Output filename : HDF5 (.h5)
123
+ if (fn_h5_out is None): ## automatically determine name
124
+ fname_path = os.path.dirname(self.fname)
125
+ fname_base = os.path.basename(self.fname)
126
+ fname_root, fname_ext = os.path.splitext(fname_base)
127
+ fname_root = re.findall(r'io\S+_mpi_[0-9]+', fname_root)[0]
128
+ fn_h5_out_base = fname_root+'_coh.h5'
129
+ fn_h5_out = str(PurePosixPath(fname_path, fn_h5_out_base))
130
+ if (Path(fn_h5_out).suffix != '.h5'):
131
+ raise ValueError(f"fn_h5_out='{str(fn_h5_out)}' must end in .h5")
132
+ if os.path.isfile(fn_h5_out):
133
+ #if (os.path.getsize(fn_h5_out) > 8*1024**3):
134
+ # raise ValueError(f"fn_h5_out='{str(fn_h5_out)}' exists and is >8 [GB]. exiting for your own safety.")
135
+ if (fn_h5_out == self.fname):
136
+ raise ValueError(f"fn_h5_out='{str(fn_h5_out)}' cannot be same as input filename.")
137
+
138
+ if verbose: even_print( 'fn_h5' , self.fname )
139
+ if verbose: even_print( 'fn_h5_out' , fn_h5_out )
140
+ if verbose: print(72*'-')
141
+ self.comm.Barrier()
142
+
143
+ ## The data dictionary to be written to .h5 later
144
+ data = {}
145
+
146
+ ## Infile
147
+ fsize = os.path.getsize(self.fname)/1024**3
148
+ if verbose: even_print(os.path.basename(self.fname),'%0.1f [GB]'%fsize)
149
+ if verbose: even_print('nx',f'{self.nx:d}')
150
+ if verbose: even_print('ny',f'{self.ny:d}')
151
+ if verbose: even_print('nz',f'{self.nz:d}')
152
+ if verbose: even_print('nt',f'{self.nt:d}')
153
+ if verbose: even_print('ngp',f'{self.ngp/1e6:0.1f} [M]')
154
+ #if verbose: even_print('cy',f'{cy:d}')
155
+ if verbose: even_print('sy',f'{sy:d}')
156
+ if verbose: even_print('n_ranks',f'{self.n_ranks:d}')
157
+ if verbose: even_print('n_threads',f'{n_threads:d}')
158
+ if verbose: print(72*'-')
159
+
160
+ ## 0D freestream scalars
161
+ lchar = self.lchar ; data['lchar'] = lchar
162
+ U_inf = self.U_inf ; data['U_inf'] = U_inf
163
+ rho_inf = self.rho_inf ; data['rho_inf'] = rho_inf
164
+ T_inf = self.T_inf ; data['T_inf'] = T_inf
165
+
166
+ #data['M_inf'] = self.M_inf
167
+ data['Ma'] = self.Ma
168
+ data['Pr'] = self.Pr
169
+
170
+ ## Read in 1D coordinate arrays & re-dimensionalize
171
+ x = np.copy( self['dims/x'][()] * self.lchar )
172
+ y = np.copy( self['dims/y'][()] * self.lchar )
173
+ z = np.copy( self['dims/z'][()] * self.lchar )
174
+ t = np.copy( self['dims/t'][()] * self.tchar )
175
+
176
+ nx = self.nx ; data['nx'] = nx
177
+ ny = self.ny ; data['ny'] = ny
178
+ nz = self.nz ; data['nz'] = nz
179
+ nt = self.nt ; data['nt'] = nt
180
+
181
+ ## Assert constant Δz
182
+ dz0 = np.diff(z)[0]
183
+ if not np.all(np.isclose(np.diff(z), dz0, rtol=1e-6)):
184
+ raise NotImplementedError('Δz not constant')
185
+ dz = np.diff(z)[0]
186
+
187
+ ## dimensional [s]
188
+ dt = self.dt * self.tchar
189
+ np.testing.assert_allclose(dt, t[1]-t[0], rtol=1e-12, atol=1e-12)
190
+
191
+ t_meas = self.duration * self.tchar
192
+ np.testing.assert_allclose(t_meas, t.max()-t.min(), rtol=1e-12, atol=1e-12)
193
+
194
+ zrange = z.max() - z.min()
195
+
196
+ data['x'] = x
197
+ data['y'] = y
198
+ data['z'] = z
199
+
200
+ data['t'] = t
201
+ data['t_meas'] = t_meas
202
+ data['dt'] = dt
203
+ data['dz'] = dz
204
+ data['zrange'] = zrange
205
+
206
+ if verbose: even_print( 'Δt/tchar' , f'{dt/self.tchar:0.8f}' )
207
+ if verbose: even_print( 'Δt' , f'{dt:0.3e} [s]' )
208
+ if verbose: even_print( 'duration/tchar' , f'{self.duration:0.1f}' )
209
+ if verbose: even_print( 'duration' , f'{self.duration*self.tchar:0.3e} [s]' )
210
+ if verbose: print(72*'-')
211
+
212
+ ## report
213
+ if verbose:
214
+ even_print('Δt' , f'{dt :0.5e} [s]' )
215
+ even_print('t_meas' , f'{t_meas:0.5e} [s]' )
216
+ even_print('Δz' , f'{dz0 :0.5e} [m]' )
217
+ even_print('zrange' , f'{zrange:0.5e} [m]' )
218
+ print(72*'-')
219
+
220
+ ## Establish [t] windowing & get frequency
221
+ nperseg = nt // n_win
222
+ noverlap = int(round(nperseg*overlap_fac_nom))
223
+ overlap_fac = noverlap / nperseg
224
+ fs = 1./dt ## dimensional [1/s]
225
+
226
+ ## Get [freq] vector
227
+ freq_w0,_ = csd(
228
+ np.zeros((nt,),dtype=np.float64),
229
+ np.zeros((nt,),dtype=np.float64),
230
+ fs=fs,
231
+ nperseg=nperseg,
232
+ noverlap=noverlap,
233
+ window='hann',
234
+ detrend=False,
235
+ scaling='density',
236
+ return_onesided=True,
237
+ )
238
+ fp = np.where(freq_w0>0) ## dont include 0 freq
239
+ freq = np.copy(freq_w0[fp])
240
+ nf = freq.shape[0]
241
+ df = np.diff(freq)[0]
242
+
243
+ data['nperseg'] = nperseg
244
+ data['noverlap'] = noverlap
245
+ data['freq'] = freq
246
+ data['df'] = df
247
+ data['nf'] = nf
248
+
249
+ if verbose:
250
+ even_print('overlap_fac (nominal)' , f'{overlap_fac_nom:0.5f}' )
251
+ even_print('n_win' , f'{n_win:d}' )
252
+ even_print('nperseg' , f'{nperseg:d}' )
253
+ even_print('noverlap' , f'{noverlap:d}' )
254
+ even_print('overlap_fac' , f'{overlap_fac:0.5f}' )
255
+ print(72*'-')
256
+
257
+ if verbose:
258
+ even_print('freq min',f'{freq.min():0.1f} [Hz]')
259
+ even_print('freq max',f'{freq.max():0.1f} [Hz]')
260
+ even_print('df',f'{df:0.1f} [Hz]')
261
+ even_print('nf',f'{nf:d}')
262
+ print(72*'-')
263
+
264
+ ## ## [z] Wavenumber (kz) vector -- scipy fftfreq version
265
+ ## kz_full = sp.fft.fftfreq(n=nz, d=dz0) * ( 2 * np.pi )
266
+ ## kzp = np.where(kz_full>0) ## dont include k=0 or (-) k
267
+ ## kz = np.copy(kz_full[kzp])
268
+ ## dkz = kz[1] - kz[0]
269
+ ## nkz = kz.shape[0]
270
+
271
+ ## [z] Wavenumber (kz) vector
272
+ kz_ov_2pi,_ = csd(
273
+ np.zeros((nz,),dtype=np.float64),
274
+ np.zeros((nz,),dtype=np.float64),
275
+ fs=1/dz0,
276
+ nperseg=nz,
277
+ noverlap=0,
278
+ window='boxcar',
279
+ detrend=False,
280
+ scaling='density',
281
+ return_onesided=True,
282
+ )
283
+ kz_full = kz_ov_2pi * (2 * np.pi)
284
+ kzp = np.where(kz_full>0) ## dont include k=0
285
+ kz = np.copy(kz_full[kzp])
286
+ dkz = kz[1] - kz[0]
287
+ nkz = kz.shape[0]
288
+
289
+ data['kz'] = kz
290
+ data['dkz'] = dkz
291
+ data['nkz'] = nkz
292
+
293
+ if verbose:
294
+ even_print('kz min',f'{kz.min():0.1f} [1/m]')
295
+ even_print('kz max',f'{kz.max():0.1f} [1/m]')
296
+ even_print('dkz',f'{dkz:0.1f} [1/m]')
297
+ even_print('nkz',f'{nkz:d}')
298
+ print(72*'-')
299
+
300
+ ## Wavelength λz = (2·π)/kz
301
+ lz = np.copy( 2 * np.pi / kz )
302
+ data['lz'] = lz
303
+
304
+ # ===
305
+
306
+ ## Get lags [t]
307
+ lags_t,_ = ccor( np.ones(nt,dtype=np.float32) , np.ones(nt,dtype=np.float32), get_lags=True )
308
+ n_lags_t_ = nt*2-1
309
+ n_lags_t = lags_t.shape[0]
310
+ if (n_lags_t!=n_lags_t_):
311
+ raise AssertionError('check lags [t]')
312
+
313
+ data['lags_t'] = lags_t
314
+ data['n_lags_t'] = n_lags_t
315
+
316
+ if verbose:
317
+ even_print('n lags (Δt)' , '%i'%(n_lags_t,))
318
+
319
+ # ===
320
+
321
+ ## cross-correlation pairs
322
+ ## [ str:var1, str:var2, bool:do_density_weighting]
323
+ ccor_combis = [
324
+
325
+ [ 'utau' , 'u' , True ], ## [ uτ′ , ρ·u″ ]
326
+ [ 'utau' , 'v' , True ], ## [ uτ′ , ρ·v″ ]
327
+ [ 'utau' , 'u' , False ], ## [ uτ′ , u′ ]
328
+ [ 'utau' , 'v' , False ], ## [ uτ′ , v′ ]
329
+ [ 'utau' , 'p' , False ], ## [ uτ′ , p′ ]
330
+ [ 'utau' , 'T' , False ], ## [ uτ′ , T′ ]
331
+
332
+ [ 'tauuy' , 'u' , True ], ## [ τuy′ , ρ·u″ ]
333
+ [ 'tauuy' , 'v' , True ], ## [ τuy′ , ρ·v″ ]
334
+ [ 'tauuy' , 'u' , False ], ## [ τuy′ , u′ ]
335
+ [ 'tauuy' , 'v' , False ], ## [ τuy′ , v′ ]
336
+ [ 'tauuy' , 'p' , False ], ## [ τuy′ , p′ ]
337
+ [ 'tauuy' , 'T' , False ], ## [ τuy′ , T′ ]
338
+
339
+ ]
340
+
341
+ ## Generate cross-correlation scalar names
342
+ scalars = []
343
+ for ccor_combi in ccor_combis:
344
+ s1,s2,do_density_weighting = ccor_combi
345
+ if do_density_weighting:
346
+ scalars.append(f'{s1}I_r{s2}II')
347
+ else:
348
+ scalars.append(f'{s1}I_{s2}I')
349
+
350
+ ## Generate avg scalar names
351
+ scalars_Re_avg = []
352
+ scalars_Fv_avg = []
353
+ for ccor_combi in ccor_combis:
354
+ s1,s2,do_density_weighting = ccor_combi
355
+ if do_density_weighting and ('rho' not in scalars_Re_avg):
356
+ scalars_Re_avg.append('rho')
357
+ if do_density_weighting:
358
+ #if (s1 not in scalars_Fv_avg):
359
+ # scalars_Fv_avg.append(s1)
360
+ if (s2 not in scalars_Fv_avg):
361
+ scalars_Fv_avg.append(s2)
362
+ else:
363
+ #if (s1 not in scalars_Re_avg):
364
+ # scalars_Re_avg.append(s1)
365
+ if (s2 not in scalars_Re_avg):
366
+ scalars_Re_avg.append(s2)
367
+
368
+ ## numpy formatted arrays: buffers for PSD & other data (rank-local)
369
+ Rt = np.zeros(shape=(nyr, n_lags_t ) , dtype={'names':scalars , 'formats':[np.dtype(np.float64) for s in scalars]})
370
+ Coh_t = np.zeros(shape=(nyr, nf ) , dtype={'names':scalars , 'formats':[np.dtype(np.complex128) for s in scalars]})
371
+ Coh_z = np.zeros(shape=(nyr, nkz ) , dtype={'names':scalars , 'formats':[np.dtype(np.complex128) for s in scalars]})
372
+ Pt = np.zeros(shape=(nyr, nf ) , dtype={'names':scalars , 'formats':[np.dtype(np.complex128) for s in scalars]})
373
+ Pz = np.zeros(shape=(nyr, nkz ) , dtype={'names':scalars , 'formats':[np.dtype(np.complex128) for s in scalars]})
374
+ covariance = np.zeros(shape=(nyr, ) , dtype={'names':scalars , 'formats':[np.dtype(np.float64) for s in scalars]})
375
+ avg_Re = np.zeros(shape=(nyr, ) , dtype={'names':scalars_Re_avg , 'formats':[np.dtype(np.float64) for s in scalars_Re_avg]})
376
+ avg_Fv = np.zeros(shape=(nyr, ) , dtype={'names':scalars_Fv_avg , 'formats':[np.dtype(np.float64) for s in scalars_Fv_avg]})
377
+
378
+ if verbose:
379
+ even_print('n cross-correlation scalar combinations' , f'{len(ccor_combis):d}')
380
+ print(72*'-')
381
+
382
+ # ==============================================================
383
+ # Calculate instantaneous uτ & τuy
384
+ # ==============================================================
385
+
386
+ if verbose:
387
+ print('>>> calculating uτ & τuy')
388
+ self.comm.Barrier()
389
+ t_start = timeit.default_timer()
390
+
391
+ if (self.rank==0):
392
+
393
+ u = np.zeros(shape=(nx,7,nz,nt), dtype=np.float64)
394
+ T_wall = np.zeros(shape=(nx,1,nz,nt), dtype=np.float64)
395
+ rho_wall = np.zeros(shape=(nx,1,nz,nt), dtype=np.float64)
396
+
397
+ dset = self['data/u']
398
+ #with dset.collective:
399
+ u[:,:,:,:] = dset[:,:,:7,:].T
400
+ dset = self['data/T']
401
+ #with dset.collective:
402
+ T_wall[:,:,:,:] = dset[:,:,0,:].T[:,np.newaxis,:,:]
403
+ dset = self['data/rho']
404
+ #with dset.collective:
405
+ rho_wall[:,:,:,:] = dset[:,:,0,:].T[:,np.newaxis,:,:]
406
+
407
+ ## Re-dimensionalize
408
+ u *= self.U_inf
409
+ T_wall *= self.T_inf
410
+ rho_wall *= self.rho_inf
411
+
412
+ mu_wall = np.zeros(shape=(nx,1,nz,nt), dtype=np.float64)
413
+ mu_wall[:,:,:,:] = self.mu_Suth_ref * ( T_wall / self.T_Suth_ref )**(3/2) * ( ( self.T_Suth_ref + self.S_Suth ) / ( T_wall + self.S_Suth ) )
414
+
415
+ ddy_u = np.zeros(shape=(nx,7,nz,nt), dtype=np.float64)
416
+ ddy_u[:,:,:,:] = gradient(u, y[:7], axis=1, acc=acc, edge_stencil=edge_stencil, d=1)
417
+
418
+ ddy_u_wall = np.zeros(shape=(nx,1,nz,nt), dtype=np.float64)
419
+ ddy_u_wall[:,:,:,:] = ddy_u[:,0,:,:][:,np.newaxis,:,:]
420
+
421
+ ddy_u = None ; del ddy_u
422
+ u = None ; del u
423
+
424
+ ## INSTANTANEOUS τw
425
+ tau_uy = np.zeros(shape=(nx,1,nz,nt), dtype=np.float64)
426
+ tau_uy[:,:,:,:] = mu_wall[:,:,:,:] * ddy_u_wall[:,:,:,:]
427
+
428
+ ## INSTANTANEOUS uτ
429
+ u_tau = np.zeros(shape=(nx,1,nz,nt), dtype=np.float64)
430
+ u_tau[:,:,:,:] = np.sign(tau_uy) * np.sqrt( np.abs(tau_uy) / rho_wall )
431
+
432
+ mu_wall = None ; del mu_wall
433
+ T_wall = None ; del T_wall
434
+ rho_wall = None ; del rho_wall
435
+
436
+ if ( u_tau.shape != (nx,1,nz,nt) ) or ( tau_uy.shape != (nx,1,nz,nt) ):
437
+ print(f'rank {self.rank:d}: shape violation')
438
+ self.comm.Abort(1)
439
+
440
+ u_tau_avg = np.mean(u_tau , axis=3, dtype=np.float64, keepdims=True) ## (x,1,z,1)
441
+ tau_uy_avg = np.mean(tau_uy , axis=3, dtype=np.float64, keepdims=True) ## (x,1,z,1)
442
+
443
+ if ( u_tau_avg.shape != (nx,1,nz,1) ) or ( tau_uy_avg.shape != (nx,1,nz,1) ):
444
+ print(f'rank {self.rank:d}: shape violation')
445
+ self.comm.Abort(1)
446
+
447
+ u_tau_avg = None ; del u_tau_avg
448
+ tau_uy_avg = None ; del tau_uy_avg
449
+
450
+ # ==============================================================
451
+
452
+ self.comm.Barrier()
453
+ t_delta = timeit.default_timer() - t_start
454
+ if verbose:
455
+ even_print('calculate uτ & τuy',format_time_string(t_delta))
456
+ print(72*'-')
457
+
458
+ ## Initialize buffers on non-0 ranks
459
+ if self.rank!=0:
460
+ tau_uy = np.zeros(shape=(nx,1,nz,nt), dtype=np.float64)
461
+ u_tau = np.zeros(shape=(nx,1,nz,nt), dtype=np.float64)
462
+
463
+ ## Broadcast from rank 0 to all ranks
464
+ self.comm.Barrier()
465
+ t_start = timeit.default_timer()
466
+
467
+ self.comm.Bcast( tau_uy , root=0 )
468
+ self.comm.Bcast( u_tau , root=0 )
469
+
470
+ self.comm.Barrier()
471
+ t_delta = timeit.default_timer() - t_start
472
+
473
+ if verbose:
474
+ even_print('Bcast uτ & τuy',format_time_string(t_delta))
475
+ print(72*'-')
476
+
477
+ # ==============================================================
478
+ # Check memory
479
+ # ==============================================================
480
+
481
+ hostname = MPI.Get_processor_name()
482
+ mem_free_gb = psutil.virtual_memory().free / 1024**3
483
+ G = self.comm.gather([ self.rank , hostname , mem_free_gb ], root=0)
484
+ G = self.comm.bcast(G, root=0)
485
+
486
+ host_mem = {}
487
+ for rank, host, mem in G:
488
+ if host not in host_mem or mem < host_mem[host]:
489
+ host_mem[host] = mem
490
+ total_free = sum(host_mem.values())
491
+
492
+ if verbose:
493
+ #print(72*'-')
494
+ for key,value in host_mem.items():
495
+ even_print(f'RAM free {key}', f'{int(np.floor(value)):d} [GB]')
496
+ even_print('RAM free (local,min)', f'{int(np.floor(min(host_mem.values()))):d} [GB]')
497
+ even_print('RAM free (global)', f'{int(np.floor(total_free)):d} [GB]')
498
+
499
+ shape_read = (nx,sy,nz,nt) ## local
500
+ if verbose: even_print('read shape (local)', f'[{nx:d},{sy:d},{nz:d},{nt:d}]')
501
+ data_gb = np.dtype(np.float64).itemsize * np.prod(shape_read) / 1024**3
502
+ if verbose: even_print('read size (global)', f'{int(np.ceil(data_gb*ry)):d} [GB]')
503
+
504
+ if verbose: even_print('read size (global) ×6', f'{int(np.ceil(data_gb*ry*6)):d} [GB]')
505
+ ram_usage_est = data_gb*ry*6/total_free
506
+ if verbose: even_print('RAM usage estimate', f'{100*ram_usage_est:0.1f} [%]')
507
+
508
+ self.comm.Barrier()
509
+ if (ram_usage_est>0.80):
510
+ print('RAM consumption might be too high. exiting.')
511
+ self.comm.Abort(1)
512
+
513
+ # ==============================================================
514
+ # Main loop
515
+ # ==============================================================
516
+
517
+ if verbose:
518
+ progress_bar = tqdm(
519
+ #total=len(ccor_combis)*cy,
520
+ total=len(ccor_combis)*(nyr//sy),
521
+ ncols=100,
522
+ desc='Coh',
523
+ leave=True,
524
+ file=sys.stdout,
525
+ mininterval=0.1,
526
+ smoothing=0.,
527
+ #bar_format="\033[B{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}\033[A\n\b",
528
+ bar_format="{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}",
529
+ ascii="░█",
530
+ colour='#FF6600',
531
+ )
532
+
533
+ for cci,cc in enumerate(ccor_combis): ## ccor pairs
534
+
535
+ if verbose: tqdm.write(72*'-')
536
+
537
+ scalar_L, scalar_R, do_density_weighting = cc
538
+
539
+ if scalar_L == 'utau':
540
+ scalar_L_str = 'uτ'
541
+ elif scalar_L == 'tauuy':
542
+ scalar_L_str = 'τuy'
543
+ else:
544
+ raise RuntimeError
545
+
546
+ if do_density_weighting:
547
+ msg = f'[{scalar_L_str}′,ρ·{scalar_R}″]'
548
+ else:
549
+ msg = f'[{scalar_L_str}′,{scalar_R}′]'
550
+ if verbose:
551
+ tqdm.write(even_print('computing',msg,s=True,))
552
+
553
+ #dset_L = self[f'data/{scalar_L}']
554
+ dset_R = self[f'data/{scalar_R}']
555
+ dset_rho = self['data/rho']
556
+
557
+ scalar = scalars[cci]
558
+
559
+ ## Assert scalar name
560
+ if do_density_weighting:
561
+ if (f'{scalar_L}I_r{scalar_R}II' != scalar ):
562
+ raise ValueError
563
+ else:
564
+ if (f'{scalar_L}I_{scalar_R}I' != scalar ):
565
+ raise ValueError
566
+
567
+ # ## [y] loop outer (chunks within rank)
568
+ # for cyl_ in cyl:
569
+ # cy1, cy2 = cyl_
570
+ # nyc = cy2 - cy1
571
+
572
+ for ci in range(nyr//sy): ## [y] loop
573
+
574
+ cy1 = ry1 + ci*sy
575
+ cy2 = cy1 + sy
576
+ nyc = cy2 - cy1
577
+
578
+ ## COPY data L (no read!)
579
+ if scalar_L == 'utau':
580
+ data_L = np.zeros(shape=(nx,1,nz,nt), dtype=np.float64)
581
+ data_L[:,:,:,:] = u_tau[:,:,:,:]
582
+ elif scalar_L == 'tauuy':
583
+ data_L = np.zeros(shape=(nx,1,nz,nt), dtype=np.float64)
584
+ data_L[:,:,:,:] = tau_uy[:,:,:,:]
585
+ else:
586
+ raise RuntimeError
587
+
588
+ self.comm.Barrier()
589
+ t_start = timeit.default_timer()
590
+
591
+ ## Read data R
592
+ scalar_str = scalar_R
593
+ n_scalars_read = 1
594
+ with dset_R.collective:
595
+ data_R = np.copy( dset_R[:,:,cy1:cy2,:].T ).astype(np.float64)
596
+
597
+ ## Read ρ
598
+ if do_density_weighting:
599
+ n_scalars_read += 1
600
+ scalar_str += ',ρ'
601
+ with dset_rho.collective:
602
+ rho = np.copy( dset_rho[:,:,cy1:cy2,:].T ).astype(np.float64)
603
+ else:
604
+ rho = None
605
+
606
+ self.comm.Barrier()
607
+ t_delta = timeit.default_timer() - t_start
608
+ data_gb = n_scalars_read * ( self.nx * ry * (cy2-cy1) * self.nz * self.nt * dset_R.dtype.itemsize ) / 1024**3
609
+ if verbose:
610
+ tqdm.write(even_print(f'read: {scalar_str}', '%0.3f [GB] %0.3f [s] %0.3f [GB/s]'%(data_gb,t_delta,(data_gb/t_delta)), s=True))
611
+
612
+ ## Assert shapes
613
+ if ( data_L.shape != (nx,1,nz,nt) ):
614
+ print(f'rank {self.rank:d}: shape violation')
615
+ self.comm.Abort(1)
616
+ if ( data_R.shape != (nx,nyc,nz,nt) ):
617
+ print(f'rank {self.rank:d}: shape violation')
618
+ self.comm.Abort(1)
619
+ if (rho is not None) and ( rho.shape != (nx,nyc,nz,nt) ):
620
+ print(f'rank {self.rank:d}: shape violation')
621
+ self.comm.Abort(1)
622
+
623
+ # === Redimensionalize
624
+
625
+ if scalar_R in ['u','v','w',]:
626
+ data_R *= U_inf
627
+ elif scalar_R in ['p',]:
628
+ data_R *= rho_inf*U_inf**2
629
+ elif scalar_R in ['T',]:
630
+ data_R *= T_inf
631
+ else:
632
+ raise ValueError
633
+
634
+ if (rho is not None): ## i.e. if do_density_weighting
635
+ rho *= rho_inf
636
+
637
+ # === Compute mean-removed data
638
+
639
+ ## avg(□) or avg(ρ·□)/avg(ρ) in [t]
640
+ if do_density_weighting:
641
+ rho_avg = np.mean( rho , axis=3, dtype=np.float64, keepdims=True) ## [x,y,z,1]
642
+ data_R_avg = np.mean( rho*data_R , axis=3, dtype=np.float64, keepdims=True) ## [x,y,z,1]
643
+ data_R_avg /= rho_avg
644
+ else:
645
+ data_R_avg = np.mean( data_R , axis=3, dtype=np.float64, keepdims=True) ## [x,y,z,1]
646
+
647
+ ### pointer to data L
648
+ #if scalar_L == 'utau':
649
+ # data_L_avg = u_tau_avg
650
+ #elif scalar_L == 'tauuy':
651
+ # data_L_avg = tau_uy_avg
652
+ #else:
653
+ # raise RuntimeError
654
+
655
+ data_L_avg = np.mean( data_L , axis=3, dtype=np.float64, keepdims=True) ## (x,y,z,1)
656
+
657
+ ## Reynolds prime □′ or Favre prime □″
658
+ data_L -= data_L_avg
659
+ data_R -= data_R_avg
660
+
661
+ ## Assert stationarity / definition averaging
662
+ ## avg(□′)==0 or avg(ρ·□″)==0
663
+ if do_density_weighting:
664
+ b_ = np.mean(rho*data_R, axis=3, dtype=np.float64, keepdims=True)
665
+ else:
666
+ b_ = np.mean(data_R, axis=3, dtype=np.float64, keepdims=True)
667
+ if not np.allclose( b_, np.zeros_like(b_), atol=1e-6 ):
668
+ print(f'rank {self.rank:d}: avg(□′)!=0 or avg(ρ·□″)!=0')
669
+ self.comm.Abort(1)
670
+
671
+ ## LEFT variable (uτ′ or τuy′) ... never gets ρ-weighted / Favre mean-removed
672
+ a_ = np.mean(data_L, axis=3, dtype=np.float64, keepdims=True)
673
+ if not np.allclose( a_, np.zeros_like(a_), atol=1e-6 ):
674
+ print(f'rank {self.rank:d}: avg(□′)!=0 or avg(ρ·□″)!=0')
675
+ self.comm.Abort(1)
676
+
677
+ a_ = None ; del a_
678
+ b_ = None ; del b_
679
+
680
+ ## Covariance (L here is never ρ-weighted)
681
+ if do_density_weighting:
682
+ covariance_ = np.mean( data_L * rho*data_R , axis=3 , dtype=np.float64, keepdims=True)
683
+ else:
684
+ covariance_ = np.mean( data_L * data_R , axis=3 , dtype=np.float64, keepdims=True)
685
+
686
+ ## Write this chunk/scalar's covariance to covariance buffer
687
+ ## avg over [x,z] : [x,y,z,1] --> [y]
688
+ yiA = cy1 - ry1
689
+ yiB = cy2 - ry1
690
+ covariance[scalar][yiA:yiB] = np.squeeze( np.mean( covariance_ , axis=(0,2,3) , dtype=np.float64) )
691
+
692
+ ## Write (rank-local) 1D [y] averages to buffers
693
+ if do_density_weighting:
694
+ avg_Fv[scalar_R][yiA:yiB] = np.squeeze( np.mean( data_R_avg , axis=(0,2,3) , dtype=np.float64) )
695
+ avg_Re['rho'][yiA:yiB] = np.squeeze( np.mean( rho_avg , axis=(0,2,3) , dtype=np.float64) )
696
+ else:
697
+ avg_Re[scalar_R][yiA:yiB] = np.squeeze( np.mean( data_R_avg , axis=(0,2,3) , dtype=np.float64) )
698
+
699
+ # ===============================================================================
700
+ # At this point you have 4D [x,y,z,t] [_,□′] or [_,ρ·□″] data
701
+ # ===============================================================================
702
+
703
+ def __ccor_kernel_t(xi,zi,yii,do_density_weighting):
704
+ if do_density_weighting:
705
+ uR = rho[xi,yii,zi,:] * data_R[xi,yii,zi,:]
706
+ else:
707
+ uR = data_R[xi,yii,zi,:]
708
+ uL = data_L[xi,0,zi,:]
709
+ return xi,zi,ccor(uL,uR)
710
+
711
+ def __coherence_kernel_t(xi,zi,yii,do_density_weighting):
712
+
713
+ ## 1D [t] □′ or ρ·□″ vectors
714
+ if do_density_weighting:
715
+ uR = rho[xi,yii,zi,:] * data_R[xi,yii,zi,:]
716
+ else:
717
+ uR = data_R[xi,yii,zi,:]
718
+ uL = data_L[xi,0,zi,:]
719
+
720
+ _,Pxx = csd(
721
+ uL,uL,
722
+ fs=fs,
723
+ nperseg=nperseg,
724
+ noverlap=noverlap,
725
+ window='hann',
726
+ detrend=False,
727
+ scaling='density',
728
+ return_onesided=True,
729
+ )
730
+ _,Pyy = csd(
731
+ uR,uR,
732
+ fs=fs,
733
+ nperseg=nperseg,
734
+ noverlap=noverlap,
735
+ window='hann',
736
+ detrend=False,
737
+ scaling='density',
738
+ return_onesided=True,
739
+ )
740
+ _,Pxy = csd(
741
+ uL,uR,
742
+ fs=fs,
743
+ nperseg=nperseg,
744
+ noverlap=noverlap,
745
+ window='hann',
746
+ detrend=False,
747
+ scaling='density',
748
+ return_onesided=True,
749
+ )
750
+
751
+ eps = np.finfo(float).eps
752
+ Pxx = np.real(Pxx) ## imag part of auto- cross spectral density is =0 anyway
753
+ Pyy = np.real(Pyy)
754
+ Pxx = np.maximum(Pxx, eps)
755
+ Pyy = np.maximum(Pyy, eps)
756
+ Coh = (np.abs(Pxy)**2) / (Pxx * Pyy)
757
+ return xi,zi,Pxy,Coh
758
+
759
+ def __coherence_kernel_z(xi,ti,yii,do_density_weighting):
760
+
761
+ ## 1D [z] □′ or ρ·□″ vectors
762
+ if do_density_weighting:
763
+ uR = rho[xi,yii,:,ti] * data_R[xi,yii,:,ti]
764
+ else:
765
+ uR = data_R[xi,yii,:,ti]
766
+ uL = data_L[xi,0,:,ti]
767
+
768
+ N = uL.shape[0]
769
+
770
+ _,Pxx = csd(
771
+ uL,uL,
772
+ fs=fs,
773
+ nperseg=N,
774
+ noverlap=0,
775
+ window='boxcar',
776
+ detrend=False,
777
+ scaling='density',
778
+ return_onesided=True,
779
+ )
780
+ _,Pyy = csd(
781
+ uR,uR,
782
+ fs=fs,
783
+ nperseg=N,
784
+ noverlap=0,
785
+ window='boxcar',
786
+ detrend=False,
787
+ scaling='density',
788
+ return_onesided=True,
789
+ )
790
+ _,Pxy = csd(
791
+ uL,uR,
792
+ fs=fs,
793
+ nperseg=N,
794
+ noverlap=0,
795
+ window='boxcar',
796
+ detrend=False,
797
+ scaling='density',
798
+ return_onesided=True,
799
+ )
800
+
801
+ eps = np.finfo(float).eps
802
+ Pxx = np.real(Pxx) ## imag part of auto- cross spectral density is =0 anyway
803
+ Pyy = np.real(Pyy)
804
+ Pxx = np.maximum(Pxx, eps)
805
+ Pyy = np.maximum(Pyy, eps)
806
+ Coh = (np.abs(Pxy)**2) / (Pxx * Pyy)
807
+ return xi,ti,Pxy,Coh
808
+
809
+ self.comm.Barrier()
810
+ t_start = timeit.default_timer()
811
+
812
+ ## [y] loop inner (indices within chunk)
813
+ for yi in range(cy1,cy2):
814
+
815
+ yii = yi - cy1 ## chunk local
816
+ yiii = yi - ry1 ## rank local
817
+
818
+ # ===========================================================================
819
+ # Cross-Correlation [t] : loop over [x,z]
820
+ # ===========================================================================
821
+
822
+ ## Cross-correlation buffer for [y] loop inner
823
+ R_xz = np.zeros((nx,nz,n_lags_t), dtype=np.float64) ## [x,z] range for ccor(t)
824
+
825
+ ## Concurrent/threaded execution for ccor(t)
826
+ tasks = [(xi,zi,yii,do_density_weighting) for xi in range(nx) for zi in range(nz)]
827
+ with ThreadPoolExecutor(max_workers=n_threads) as executor:
828
+ results = executor.map(lambda task: __ccor_kernel_t(*task,), tasks)
829
+ for xi,zi,result in results:
830
+ R_xz[xi,zi,:] = result
831
+
832
+ ## avg in [x,z] & write in rank context
833
+ Rt[scalar][yiii,:] = np.mean(R_xz, axis=(0,1), dtype=np.float64)
834
+
835
+ # ===========================================================================
836
+ # Coherence [t] : loop over [x,z]
837
+ # ===========================================================================
838
+
839
+ ## Coherence buffer for [y] loop inner
840
+ Coh_xz = np.zeros((nx,nz,nf), dtype=np.complex128) ## [x,z] range
841
+ P_xz = np.zeros((nx,nz,nf), dtype=np.complex128) ## [x,z] range
842
+
843
+ ## Concurrent/threaded execution for Pxy & Coherence
844
+ tasks = [(xi,zi,yii,do_density_weighting) for xi in range(nx) for zi in range(nz)]
845
+ with ThreadPoolExecutor(max_workers=n_threads) as executor:
846
+ results = executor.map(lambda task: __coherence_kernel_t(*task,), tasks)
847
+ for xi,zi,P,Coh in results:
848
+ Coh_xz[xi,zi,:] = Coh[fp]
849
+ P_xz[xi,zi,:] = P[fp]
850
+
851
+ # if np.isnan(Coh_xz).any():
852
+ # print('NaNs in Coh_xz')
853
+ # self.comm.Abort(1)
854
+ # if np.isnan(P_xz).any():
855
+ # print('NaNs in P_xz')
856
+ # self.comm.Abort(1)
857
+
858
+ Coh_t[scalar][yiii,:] = np.mean(Coh_xz , axis=(0,1), dtype=np.complex128)
859
+ Pt[scalar][yiii,:] = np.mean(P_xz , axis=(0,1), dtype=np.complex128)
860
+
861
+ # ===========================================================================
862
+ # Coherence [z] : loop over [x,t]
863
+ # ===========================================================================
864
+
865
+ ## Coherence buffer for [y] loop inner
866
+ Coh_xt = np.zeros((nx,nt,nkz), dtype=np.complex128) ## [x,t] range
867
+ P_xt = np.zeros((nx,nt,nkz), dtype=np.complex128) ## [x,t] range
868
+
869
+ ## Concurrent/threaded execution for Pxy & Coherence
870
+ tasks = [(xi,ti,yii,do_density_weighting) for xi in range(nx) for ti in range(nt)]
871
+ with ThreadPoolExecutor(max_workers=n_threads) as executor:
872
+ results = executor.map(lambda task: __coherence_kernel_z(*task,), tasks)
873
+ for xi,ti,P,Coh in results:
874
+ Coh_xt[xi,ti,:] = Coh[kzp]
875
+ P_xt[xi,ti,:] = P[kzp]
876
+
877
+ # if np.isnan(Coh_xt).any():
878
+ # print('NaNs in Coh_xt')
879
+ # self.comm.Abort(1)
880
+ # if np.isnan(P_xt).any():
881
+ # print('NaNs in P_xt')
882
+ # self.comm.Abort(1)
883
+
884
+ Coh_z[scalar][yiii,:] = np.mean(Coh_xt , axis=(0,1), dtype=np.complex128)
885
+ Pz[scalar][yiii,:] = np.mean(P_xt , axis=(0,1), dtype=np.complex128)
886
+
887
+ # ===========================================================================
888
+
889
+ self.comm.Barrier()
890
+ t_delta = timeit.default_timer() - t_start
891
+ if verbose: tqdm.write(even_print(msg, format_time_string(t_delta), s=True))
892
+ if verbose: progress_bar.update() ## (scalar, [y] chunk) progress
893
+
894
+ #break ## DEBUG ([y] loop)
895
+ #break ## DEBUG (scalar loop)
896
+
897
+ if verbose: progress_bar.close()
898
+ self.comm.Barrier()
899
+ if verbose: print(72*'-')
900
+
901
+ # ==============================================================
902
+ # Write HDF5 (.h5) file
903
+ # ==============================================================
904
+
905
+ ## Open on rank 0 and write attributes, dimensions, etc.
906
+ if (self.rank==0):
907
+ with h5py.File(fn_h5_out, 'w') as hfw:
908
+
909
+ ## Write floats,ints as top-level attributes
910
+ for key,val in data.items():
911
+ if isinstance(data[key], (int,np.int32,np.int64)):
912
+ hfw.attrs[key] = val
913
+ elif isinstance(data[key], (float,np.float32,np.float64)):
914
+ hfw.attrs[key] = val
915
+ elif isinstance(data[key], np.ndarray):
916
+ pass
917
+ else:
918
+ print(f'key {key} is type {str(type(data[key]))}')
919
+ self.comm.Abort(1)
920
+
921
+ ## Write dim arrays
922
+ hfw.create_dataset( 'dims/x' , data=x ) ## [m]
923
+ hfw.create_dataset( 'dims/y' , data=y ) ## [m]
924
+ hfw.create_dataset( 'dims/z' , data=z ) ## [m]
925
+ hfw.create_dataset( 'dims/t' , data=t ) ## [s]
926
+ hfw.create_dataset( 'dims/freq' , data=freq ) ## [1/s] | [Hz]
927
+ hfw.create_dataset( 'dims/kz' , data=kz ) ## [1/m]
928
+ hfw.create_dataset( 'dims/lags_t' , data=lags_t ) ## [s]
929
+
930
+ ## Initialize datasets
931
+ for scalar in scalars:
932
+ hfw.create_dataset( f'covariance/{scalar}' , shape=(ny,) , dtype=np.float64 , chunks=None , data=np.full((ny,),0.,np.float64) )
933
+ hfw.create_dataset( f'Rt/{scalar}' , shape=(ny,n_lags_t) , dtype=np.float64 , chunks=(1,n_lags_t) , data=np.full((ny,n_lags_t),0.,np.float64) )
934
+ hfw.create_dataset( f'Pt/{scalar}' , shape=(ny,nf) , dtype=np.complex128 , chunks=(1,nf) , data=np.full((ny,nf),0.,np.complex128) )
935
+ hfw.create_dataset( f'Pz/{scalar}' , shape=(ny,nkz) , dtype=np.complex128 , chunks=(1,nkz) , data=np.full((ny,nkz),0.,np.complex128) )
936
+ hfw.create_dataset( f'Coh_t/{scalar}' , shape=(ny,nf) , dtype=np.complex128 , chunks=(1,nf) , data=np.full((ny,nf),0.,np.complex128) )
937
+ hfw.create_dataset( f'Coh_z/{scalar}' , shape=(ny,nkz) , dtype=np.complex128 , chunks=(1,nkz) , data=np.full((ny,nkz),0.,np.complex128) )
938
+
939
+ ## Initialize datasets 1D [y] mean
940
+ for scalar in avg_Re.dtype.names:
941
+ hfw.create_dataset( f'avg/Re/{scalar}', shape=(ny,), dtype=np.float64, chunks=None, data=np.full((ny,),0.,np.float64) )
942
+ for scalar in avg_Fv.dtype.names:
943
+ hfw.create_dataset( f'avg/Fv/{scalar}', shape=(ny,), dtype=np.float64, chunks=None, data=np.full((ny,),0.,np.float64) )
944
+
945
+ self.comm.Barrier()
946
+
947
+ with h5py.File(fn_h5_out, 'a', driver='mpio', comm=self.comm) as hfw:
948
+
949
+ ## Collectively write covariance,Rt,P,Coh
950
+ for scalar in scalars:
951
+ dset = hfw[f'covariance/{scalar}']
952
+ with dset.collective:
953
+ dset[ry1:ry2] = covariance[scalar][:]
954
+ dset = hfw[f'Rt/{scalar}']
955
+ with dset.collective:
956
+ dset[ry1:ry2,:] = Rt[scalar][:,:]
957
+ dset = hfw[f'Pt/{scalar}']
958
+ with dset.collective:
959
+ dset[ry1:ry2,:] = Pt[scalar][:,:]
960
+ dset = hfw[f'Pz/{scalar}']
961
+ with dset.collective:
962
+ dset[ry1:ry2,:] = Pz[scalar][:,:]
963
+ dset = hfw[f'Coh_t/{scalar}']
964
+ with dset.collective:
965
+ dset[ry1:ry2,:] = Coh_t[scalar][:,:]
966
+ dset = hfw[f'Coh_z/{scalar}']
967
+ with dset.collective:
968
+ dset[ry1:ry2,:] = Coh_z[scalar][:,:]
969
+
970
+ ## Collectively write 1D [y] avgs (Reynolds,Favre)
971
+ for scalar in avg_Re.dtype.names:
972
+ dset = hfw[f'avg/Re/{scalar}']
973
+ with dset.collective:
974
+ dset[ry1:ry2] = avg_Re[scalar][:]
975
+ for scalar in avg_Fv.dtype.names:
976
+ dset = hfw[f'avg/Fv/{scalar}']
977
+ with dset.collective:
978
+ dset[ry1:ry2] = avg_Fv[scalar][:]
979
+
980
+ ## Report file contents
981
+ self.comm.Barrier()
982
+ if (self.rank==0):
983
+ even_print( os.path.basename(fn_h5_out) , f'{(os.path.getsize(fn_h5_out)/1024**2):0.1f} [MB]' )
984
+ print(72*'-')
985
+ with h5py.File(fn_h5_out,'r') as hfr:
986
+ h5_print_contents(hfr)
987
+ self.comm.Barrier()
988
+
989
+ if verbose: print(72*'-')
990
+ if verbose: print('total time : rgd.calc_wall_coh_xpln() : %s'%format_time_string((timeit.default_timer() - t_start_func)))
991
+ if verbose: print(72*'-')
992
+ return