turbx 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- turbx/__init__.py +52 -0
- turbx/bl.py +620 -0
- turbx/blasius.py +64 -0
- turbx/cli.py +19 -0
- turbx/composite_profile.py +243 -0
- turbx/confidence_interval.py +64 -0
- turbx/eas3.py +420 -0
- turbx/eas4.py +567 -0
- turbx/fig_ax_constructor.py +52 -0
- turbx/freestream_parameters.py +268 -0
- turbx/gradient.py +391 -0
- turbx/grid_metric.py +272 -0
- turbx/h5.py +236 -0
- turbx/mvp.py +385 -0
- turbx/rgd.py +2693 -0
- turbx/rgd_mean.py +523 -0
- turbx/rgd_testing.py +354 -0
- turbx/rgd_xpln_ccor.py +701 -0
- turbx/rgd_xpln_coh.py +992 -0
- turbx/rgd_xpln_mean_dim.py +336 -0
- turbx/rgd_xpln_spectrum.py +940 -0
- turbx/rgd_xpln_stats.py +738 -0
- turbx/rgd_xpln_turb_budget.py +1193 -0
- turbx/set_mpl_env.py +85 -0
- turbx/signal.py +277 -0
- turbx/spd.py +1206 -0
- turbx/spd_wall_ccor.py +629 -0
- turbx/spd_wall_ci.py +406 -0
- turbx/spd_wall_import.py +676 -0
- turbx/spd_wall_spectrum.py +638 -0
- turbx/spd_wall_stats.py +618 -0
- turbx/utils.py +84 -0
- turbx/ztmd.py +2224 -0
- turbx/ztmd_analysis.py +2337 -0
- turbx/ztmd_loader.py +56 -0
- turbx-1.0.2.dist-info/LICENSE +21 -0
- turbx-1.0.2.dist-info/METADATA +120 -0
- turbx-1.0.2.dist-info/RECORD +41 -0
- turbx-1.0.2.dist-info/WHEEL +5 -0
- turbx-1.0.2.dist-info/entry_points.txt +2 -0
- turbx-1.0.2.dist-info/top_level.txt +1 -0
turbx/spd.py
ADDED
|
@@ -0,0 +1,1206 @@
|
|
|
1
|
+
import io
|
|
2
|
+
import os
|
|
3
|
+
import re
|
|
4
|
+
import shutil
|
|
5
|
+
import subprocess
|
|
6
|
+
import sys
|
|
7
|
+
import textwrap
|
|
8
|
+
import time
|
|
9
|
+
import timeit
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
import h5py
|
|
13
|
+
import numpy as np
|
|
14
|
+
from mpi4py import MPI
|
|
15
|
+
from tqdm import tqdm
|
|
16
|
+
|
|
17
|
+
from .h5 import h5_chunk_sizer
|
|
18
|
+
from .spd_wall_ccor import _calc_ccor_wall
|
|
19
|
+
from .spd_wall_ci import _calc_mean_uncertainty_BMBC
|
|
20
|
+
from .spd_wall_import import _import_eas4_wall, _init_from_eas4_wall
|
|
21
|
+
from .spd_wall_spectrum import _calc_turb_cospectrum_wall
|
|
22
|
+
from .spd_wall_stats import _calc_statistics_wall
|
|
23
|
+
from .utils import even_print, format_time_string
|
|
24
|
+
|
|
25
|
+
# ======================================================================
|
|
26
|
+
|
|
27
|
+
class spd(h5py.File):
|
|
28
|
+
'''
|
|
29
|
+
Surface Polydata (SPD)
|
|
30
|
+
'''
|
|
31
|
+
|
|
32
|
+
def __init__(self, *args, **kwargs):
|
|
33
|
+
|
|
34
|
+
self.fname, self.open_mode = args
|
|
35
|
+
|
|
36
|
+
self.fname_path = os.path.dirname(self.fname)
|
|
37
|
+
self.fname_base = os.path.basename(self.fname)
|
|
38
|
+
self.fname_root, self.fname_ext = os.path.splitext(self.fname_base)
|
|
39
|
+
|
|
40
|
+
## default to libver='latest' if none provided
|
|
41
|
+
if ('libver' not in kwargs):
|
|
42
|
+
kwargs['libver'] = 'latest'
|
|
43
|
+
|
|
44
|
+
## catch possible user error --> could prevent accidental EAS overwrites
|
|
45
|
+
if (self.fname_ext=='.eas'):
|
|
46
|
+
raise ValueError('EAS4 files should not be opened with turbx.spd()')
|
|
47
|
+
|
|
48
|
+
## determine if using mpi
|
|
49
|
+
if ('driver' in kwargs) and (kwargs['driver']=='mpio'):
|
|
50
|
+
self.usingmpi = True
|
|
51
|
+
else:
|
|
52
|
+
self.usingmpi = False
|
|
53
|
+
|
|
54
|
+
## determine communicator & rank info
|
|
55
|
+
if self.usingmpi:
|
|
56
|
+
self.comm = kwargs['comm']
|
|
57
|
+
self.n_ranks = self.comm.Get_size()
|
|
58
|
+
self.rank = self.comm.Get_rank()
|
|
59
|
+
else:
|
|
60
|
+
self.comm = None
|
|
61
|
+
self.n_ranks = 1
|
|
62
|
+
self.rank = 0
|
|
63
|
+
|
|
64
|
+
## spd() unique kwargs (not h5py.File kwargs) --> pop() rather than get()
|
|
65
|
+
stripe_count = kwargs.pop('stripe_count' , 16 )
|
|
66
|
+
stripe_size_mb = kwargs.pop('stripe_size_mb' , 2 )
|
|
67
|
+
perms = kwargs.pop('perms' , '640' )
|
|
68
|
+
no_indep_rw = kwargs.pop('no_indep_rw' , False )
|
|
69
|
+
|
|
70
|
+
if not isinstance(stripe_count, int):
|
|
71
|
+
raise ValueError
|
|
72
|
+
if not isinstance(stripe_size_mb, int):
|
|
73
|
+
raise ValueError
|
|
74
|
+
if not isinstance(perms, str) or len(perms)!=3 or not re.fullmatch(r'\d{3}',perms):
|
|
75
|
+
raise ValueError("perms must be 3-digit string like '660'")
|
|
76
|
+
|
|
77
|
+
## if not using MPI, remove 'driver' and 'comm' from kwargs
|
|
78
|
+
if ( not self.usingmpi ) and ('driver' in kwargs):
|
|
79
|
+
kwargs.pop('driver')
|
|
80
|
+
if ( not self.usingmpi ) and ('comm' in kwargs):
|
|
81
|
+
kwargs.pop('comm')
|
|
82
|
+
|
|
83
|
+
## | mpiexec --mca io romio321 -n $NP python3 ...
|
|
84
|
+
## | mpiexec --mca io ompio -n $NP python3 ...
|
|
85
|
+
## | ompi_info --> print ompi settings (grep 'MCA io' for I/O opts)
|
|
86
|
+
## | export ROMIO_FSTYPE_FORCE="lustre:" --> force Lustre driver over UFS when using romio --> causes crash
|
|
87
|
+
## | export ROMIO_FSTYPE_FORCE="ufs:"
|
|
88
|
+
## | export ROMIO_PRINT_HINTS=1 --> show available hints
|
|
89
|
+
##
|
|
90
|
+
## https://doku.lrz.de/best-practices-hints-and-optimizations-for-io-10747318.html
|
|
91
|
+
##
|
|
92
|
+
## OMPIO
|
|
93
|
+
## export OMPI_MCA_sharedfp=^lockedfile,individual
|
|
94
|
+
## mpiexec --mca io ompio -n $NP python3 script.py
|
|
95
|
+
|
|
96
|
+
## set MPI hints, passed through 'mpi_info' dict
|
|
97
|
+
if self.usingmpi:
|
|
98
|
+
if ('info' in kwargs):
|
|
99
|
+
self.mpi_info = kwargs['info']
|
|
100
|
+
else:
|
|
101
|
+
mpi_info = MPI.Info.Create()
|
|
102
|
+
|
|
103
|
+
## ROMIO only ... ignored if OMPIO is used
|
|
104
|
+
mpi_info.Set('romio_ds_write' , 'disable' )
|
|
105
|
+
mpi_info.Set('romio_ds_read' , 'disable' )
|
|
106
|
+
#mpi_info.Set('romio_cb_read' , 'automatic' )
|
|
107
|
+
#mpi_info.Set('romio_cb_write' , 'automatic' )
|
|
108
|
+
mpi_info.Set('romio_cb_read' , 'enable' )
|
|
109
|
+
mpi_info.Set('romio_cb_write' , 'enable' )
|
|
110
|
+
|
|
111
|
+
## ROMIO -- collective buffer size
|
|
112
|
+
mpi_info.Set('cb_buffer_size' , str(int(round(1*1024**3))) ) ## 1 [GB]
|
|
113
|
+
|
|
114
|
+
## ROMIO -- force collective I/O
|
|
115
|
+
if no_indep_rw:
|
|
116
|
+
mpi_info.Set('romio_no_indep_rw' , 'true' )
|
|
117
|
+
|
|
118
|
+
## cb_nodes: number of aggregator processes
|
|
119
|
+
#mpi_info.Set('cb_nodes' , str(min(16,self.n_ranks//2)) )
|
|
120
|
+
mpi_info.Set('cb_nodes' , str(min(16,self.n_ranks)) )
|
|
121
|
+
|
|
122
|
+
kwargs['info'] = mpi_info
|
|
123
|
+
self.mpi_info = mpi_info
|
|
124
|
+
|
|
125
|
+
## rdcc_w0 : preemption policy (weight) for HDF5's raw data chunk cache
|
|
126
|
+
## - influences how HDF5 evicts chunks from the per-process chunk cache
|
|
127
|
+
## - 1.0 favors retaining fully-read chunks (good for read-heavy access)
|
|
128
|
+
## - 0.0 favors recently-used chunks (better for partial writes)
|
|
129
|
+
if ('rdcc_w0' not in kwargs):
|
|
130
|
+
kwargs['rdcc_w0'] = 0.75
|
|
131
|
+
|
|
132
|
+
## rdcc_nbytes : maximum total size of the HDF5 raw chunk cache per dataset per process
|
|
133
|
+
if ('rdcc_nbytes' not in kwargs):
|
|
134
|
+
kwargs['rdcc_nbytes'] = int(1*1024**3) ## 1 [GB]
|
|
135
|
+
|
|
136
|
+
## rdcc_nslots : number of hash table slots in the raw data chunk cache
|
|
137
|
+
## - should be ~= ( rdcc_nbytes / chunk size )
|
|
138
|
+
if ('rdcc_nslots' not in kwargs):
|
|
139
|
+
#kwargs['rdcc_nslots'] = 16381 ## prime
|
|
140
|
+
kwargs['rdcc_nslots'] = kwargs['rdcc_nbytes'] // (2*1024**2) ## assume 2 [MB] chunks
|
|
141
|
+
#kwargs['rdcc_nslots'] = kwargs['rdcc_nbytes'] // (128*1024**2) ## assume 128 [MB] chunks
|
|
142
|
+
|
|
143
|
+
## spd() unique kwargs (not h5py.File kwargs) --> pop() rather than get()
|
|
144
|
+
verbose = kwargs.pop( 'verbose' , False )
|
|
145
|
+
force = kwargs.pop( 'force' , False )
|
|
146
|
+
|
|
147
|
+
if not isinstance(verbose, bool):
|
|
148
|
+
raise ValueError
|
|
149
|
+
if not isinstance(force, bool):
|
|
150
|
+
raise ValueError
|
|
151
|
+
|
|
152
|
+
# === initialize file on FS
|
|
153
|
+
|
|
154
|
+
## if file open mode is 'w', the file exists, and force is False
|
|
155
|
+
## --> raise error
|
|
156
|
+
if (self.open_mode == 'w') and (force is False) and os.path.isfile(self.fname):
|
|
157
|
+
if (self.rank==0):
|
|
158
|
+
print('\n'+72*'-')
|
|
159
|
+
print(self.fname+' already exists! opening with \'w\' would overwrite.\n')
|
|
160
|
+
openModeInfoStr = '''
|
|
161
|
+
r --> Read only, file must exist
|
|
162
|
+
r+ --> Read/write, file must exist
|
|
163
|
+
w --> Create file, truncate if exists
|
|
164
|
+
w- or x --> Create file, fail if exists
|
|
165
|
+
a --> Read/write if exists, create otherwise
|
|
166
|
+
|
|
167
|
+
or use force=True arg:
|
|
168
|
+
|
|
169
|
+
>>> with spd(<<fname>>,'w',force=True) as f:
|
|
170
|
+
>>> ...
|
|
171
|
+
'''
|
|
172
|
+
print(textwrap.indent(textwrap.dedent(openModeInfoStr), 2*' ').strip('\n'))
|
|
173
|
+
print(72*'-'+'\n')
|
|
174
|
+
sys.stdout.flush()
|
|
175
|
+
|
|
176
|
+
if (self.comm is not None):
|
|
177
|
+
self.comm.Barrier()
|
|
178
|
+
raise FileExistsError()
|
|
179
|
+
|
|
180
|
+
## if file open mode is 'w'
|
|
181
|
+
## --> <delete>, touch, chmod, stripe
|
|
182
|
+
if (self.open_mode == 'w'):
|
|
183
|
+
if (self.rank==0):
|
|
184
|
+
if os.path.isfile(self.fname): ## if the file exists, delete it
|
|
185
|
+
os.remove(self.fname)
|
|
186
|
+
time.sleep(0.5)
|
|
187
|
+
Path(self.fname).touch() ## touch a new file
|
|
188
|
+
os.chmod(self.fname, int(perms, base=8)) ## change permissions
|
|
189
|
+
if shutil.which('lfs') is not None: ## set stripe if on Lustre
|
|
190
|
+
cmd_str_lfs_migrate = f'lfs migrate --stripe-count {stripe_count:d} --stripe-size {stripe_size_mb:d}M {self.fname} > /dev/null 2>&1'
|
|
191
|
+
return_code = subprocess.call(cmd_str_lfs_migrate, shell=True)
|
|
192
|
+
if (return_code != 0):
|
|
193
|
+
raise ValueError('lfs migrate failed')
|
|
194
|
+
time.sleep(1)
|
|
195
|
+
|
|
196
|
+
if (self.comm is not None):
|
|
197
|
+
self.comm.Barrier()
|
|
198
|
+
|
|
199
|
+
self.mod_avail_tqdm = ('tqdm' in sys.modules)
|
|
200
|
+
|
|
201
|
+
## call actual h5py.File.__init__()
|
|
202
|
+
super(spd, self).__init__(*args, **kwargs)
|
|
203
|
+
self.get_header(verbose=verbose)
|
|
204
|
+
|
|
205
|
+
def get_header(self,**kwargs):
|
|
206
|
+
'''
|
|
207
|
+
initialize header attributes of SPD class instance
|
|
208
|
+
'''
|
|
209
|
+
|
|
210
|
+
verbose = kwargs.get('verbose',True)
|
|
211
|
+
|
|
212
|
+
if (self.rank!=0):
|
|
213
|
+
verbose=False
|
|
214
|
+
|
|
215
|
+
# === udef (header vector dset based) --> the 'old' way but still present in RGD,CGD
|
|
216
|
+
|
|
217
|
+
if ('header' in self):
|
|
218
|
+
|
|
219
|
+
udef_real = np.copy(self['header/udef_real'][:])
|
|
220
|
+
udef_char = np.copy(self['header/udef_char'][:]) ## the unpacked numpy array of |S128 encoded fixed-length character objects
|
|
221
|
+
udef_char = [s.decode('utf-8') for s in udef_char] ## convert it to a python list of utf-8 strings
|
|
222
|
+
self.udef = dict(zip(udef_char, udef_real)) ## make dict where keys are udef_char and values are udef_real
|
|
223
|
+
|
|
224
|
+
# === characteristic values
|
|
225
|
+
|
|
226
|
+
self.Ma = self.udef['Ma']
|
|
227
|
+
self.Re = self.udef['Re']
|
|
228
|
+
self.Pr = self.udef['Pr']
|
|
229
|
+
self.kappa = self.udef['kappa']
|
|
230
|
+
self.R = self.udef['R']
|
|
231
|
+
self.p_inf = self.udef['p_inf']
|
|
232
|
+
self.T_inf = self.udef['T_inf']
|
|
233
|
+
self.mu_Suth_ref = self.udef['mu_Suth_ref']
|
|
234
|
+
self.T_Suth_ref = self.udef['T_Suth_ref']
|
|
235
|
+
self.S_Suth = self.udef['S_Suth']
|
|
236
|
+
#self.C_Suth = self.udef['C_Suth']
|
|
237
|
+
|
|
238
|
+
self.C_Suth = self.mu_Suth_ref/(self.T_Suth_ref**(3/2))*(self.T_Suth_ref + self.S_Suth) ## [kg/(m·s·√K)]
|
|
239
|
+
self.udef['C_Suth'] = self.C_Suth
|
|
240
|
+
|
|
241
|
+
#if verbose: print(72*'-')
|
|
242
|
+
if verbose: even_print('Ma' , '%0.2f [-]' % self.Ma )
|
|
243
|
+
if verbose: even_print('Re' , '%0.1f [-]' % self.Re )
|
|
244
|
+
if verbose: even_print('Pr' , '%0.3f [-]' % self.Pr )
|
|
245
|
+
if verbose: even_print('T_inf' , '%0.3f [K]' % self.T_inf )
|
|
246
|
+
if verbose: even_print('p_inf' , '%0.1f [Pa]' % self.p_inf )
|
|
247
|
+
if verbose: even_print('kappa' , '%0.3f [-]' % self.kappa )
|
|
248
|
+
if verbose: even_print('R' , '%0.3f [J/(kg·K)]' % self.R )
|
|
249
|
+
if verbose: even_print('mu_Suth_ref' , '%0.6E [kg/(m·s)]' % self.mu_Suth_ref )
|
|
250
|
+
if verbose: even_print('T_Suth_ref' , '%0.2f [K]' % self.T_Suth_ref )
|
|
251
|
+
if verbose: even_print('S_Suth' , '%0.2f [K]' % self.S_Suth )
|
|
252
|
+
if verbose: even_print('C_Suth' , '%0.5e [kg/(m·s·√K)]' % self.C_Suth )
|
|
253
|
+
|
|
254
|
+
# === characteristic values : derived
|
|
255
|
+
|
|
256
|
+
## mu_inf_1 = 14.58e-7*self.T_inf**1.5/(self.T_inf+110.4)
|
|
257
|
+
## mu_inf_2 = self.mu_Suth_ref*(self.T_inf/self.T_Suth_ref)**(3/2) * ((self.T_Suth_ref+self.S_Suth)/(self.T_inf+self.S_Suth))
|
|
258
|
+
## mu_inf_3 = self.C_Suth*self.T_inf**(3/2)/(self.T_inf+self.S_Suth)
|
|
259
|
+
## if not np.isclose(mu_inf_1, mu_inf_2, rtol=1e-14):
|
|
260
|
+
## raise AssertionError('inconsistency in Sutherland calc --> check')
|
|
261
|
+
## if not np.isclose(mu_inf_2, mu_inf_3, rtol=1e-14):
|
|
262
|
+
## raise AssertionError('inconsistency in Sutherland calc --> check')
|
|
263
|
+
## mu_inf = self.mu_inf = mu_inf_2
|
|
264
|
+
|
|
265
|
+
self.mu_inf = self.mu_Suth_ref*(self.T_inf/self.T_Suth_ref)**(3/2) * ((self.T_Suth_ref+self.S_Suth)/(self.T_inf+self.S_Suth))
|
|
266
|
+
self.rho_inf = self.p_inf/(self.R*self.T_inf)
|
|
267
|
+
self.nu_inf = self.mu_inf/self.rho_inf
|
|
268
|
+
self.a_inf = np.sqrt(self.kappa*self.R*self.T_inf)
|
|
269
|
+
self.U_inf = self.Ma*self.a_inf
|
|
270
|
+
self.cp = self.R*self.kappa/(self.kappa-1.)
|
|
271
|
+
self.cv = self.cp/self.kappa
|
|
272
|
+
self.recov_fac = self.Pr**(1/3)
|
|
273
|
+
self.Taw = self.T_inf + self.recov_fac*self.U_inf**2/(2*self.cp)
|
|
274
|
+
self.lchar = self.Re*self.nu_inf/self.U_inf
|
|
275
|
+
|
|
276
|
+
self.tchar = self.lchar / self.U_inf
|
|
277
|
+
self.uchar = self.U_inf
|
|
278
|
+
|
|
279
|
+
if verbose: print(72*'-')
|
|
280
|
+
if verbose: even_print('rho_inf' , '%0.3f [kg/m³]' % self.rho_inf )
|
|
281
|
+
if verbose: even_print('mu_inf' , '%0.6E [kg/(m·s)]' % self.mu_inf )
|
|
282
|
+
if verbose: even_print('nu_inf' , '%0.6E [m²/s]' % self.nu_inf )
|
|
283
|
+
if verbose: even_print('a_inf' , '%0.6f [m/s]' % self.a_inf )
|
|
284
|
+
if verbose: even_print('U_inf' , '%0.6f [m/s]' % self.U_inf )
|
|
285
|
+
if verbose: even_print('cp' , '%0.3f [J/(kg·K)]' % self.cp )
|
|
286
|
+
if verbose: even_print('cv' , '%0.3f [J/(kg·K)]' % self.cv )
|
|
287
|
+
if verbose: even_print('recovery factor' , '%0.6f [-]' % self.recov_fac )
|
|
288
|
+
if verbose: even_print('Taw' , '%0.3f [K]' % self.Taw )
|
|
289
|
+
if verbose: even_print('lchar' , '%0.6E [m]' % self.lchar )
|
|
290
|
+
if verbose: even_print('tchar' , '%0.6E [s]' % self.tchar )
|
|
291
|
+
#if verbose: print(72*'-'+'\n')
|
|
292
|
+
#if verbose: print(72*'-')
|
|
293
|
+
|
|
294
|
+
# === write the 'derived' udef variables to a dict attribute of the SPD instance
|
|
295
|
+
self.udef_deriv = { 'rho_inf':self.rho_inf,
|
|
296
|
+
'mu_inf':self.mu_inf,
|
|
297
|
+
'nu_inf':self.nu_inf,
|
|
298
|
+
'a_inf':self.a_inf,
|
|
299
|
+
'U_inf':self.U_inf,
|
|
300
|
+
'cp':self.cp,
|
|
301
|
+
'cv':self.cv,
|
|
302
|
+
'recov_fac':self.recov_fac,
|
|
303
|
+
'Taw':self.Taw,
|
|
304
|
+
'lchar':self.lchar,
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
else:
|
|
308
|
+
#print("dset 'header' not in SPD")
|
|
309
|
+
pass
|
|
310
|
+
|
|
311
|
+
# === udef (attr based)
|
|
312
|
+
|
|
313
|
+
header_attr_str_list = ['Ma','Re','Pr','kappa','R','p_inf','T_inf','S_Suth','mu_Suth_ref','T_Suth_ref'] ## ,'C_Suth'
|
|
314
|
+
if all([ attr_str in self.attrs.keys() for attr_str in header_attr_str_list ]):
|
|
315
|
+
header_attr_based = True
|
|
316
|
+
else:
|
|
317
|
+
header_attr_based = False
|
|
318
|
+
|
|
319
|
+
if header_attr_based:
|
|
320
|
+
|
|
321
|
+
## set all attributes
|
|
322
|
+
for attr_str in header_attr_str_list:
|
|
323
|
+
setattr( self, attr_str, self.attrs[attr_str] )
|
|
324
|
+
|
|
325
|
+
self.C_Suth = self.mu_Suth_ref/(self.T_Suth_ref**(3/2))*(self.T_Suth_ref + self.S_Suth) ## [kg/(m·s·√K)]
|
|
326
|
+
#self.udef['C_Suth'] = self.C_Suth
|
|
327
|
+
|
|
328
|
+
#if verbose: print(72*'-')
|
|
329
|
+
if verbose: even_print('Ma' , '%0.2f [-]' % self.Ma )
|
|
330
|
+
if verbose: even_print('Re' , '%0.1f [-]' % self.Re )
|
|
331
|
+
if verbose: even_print('Pr' , '%0.3f [-]' % self.Pr )
|
|
332
|
+
if verbose: even_print('T_inf' , '%0.3f [K]' % self.T_inf )
|
|
333
|
+
if verbose: even_print('p_inf' , '%0.1f [Pa]' % self.p_inf )
|
|
334
|
+
if verbose: even_print('kappa' , '%0.3f [-]' % self.kappa )
|
|
335
|
+
if verbose: even_print('R' , '%0.3f [J/(kg·K)]' % self.R )
|
|
336
|
+
if verbose: even_print('mu_Suth_ref' , '%0.6E [kg/(m·s)]' % self.mu_Suth_ref )
|
|
337
|
+
if verbose: even_print('T_Suth_ref' , '%0.2f [K]' % self.T_Suth_ref )
|
|
338
|
+
if verbose: even_print('S_Suth' , '%0.2f [K]' % self.S_Suth )
|
|
339
|
+
if verbose: even_print('C_Suth' , '%0.5e [kg/(m·s·√K)]' % self.C_Suth )
|
|
340
|
+
|
|
341
|
+
# === characteristic values : derived
|
|
342
|
+
|
|
343
|
+
## mu_inf_1 = 14.58e-7*self.T_inf**1.5/(self.T_inf+110.4)
|
|
344
|
+
## mu_inf_2 = self.mu_Suth_ref*(self.T_inf/self.T_Suth_ref)**(3/2) * ((self.T_Suth_ref+self.S_Suth)/(self.T_inf+self.S_Suth))
|
|
345
|
+
## mu_inf_3 = self.C_Suth*self.T_inf**(3/2)/(self.T_inf+self.S_Suth)
|
|
346
|
+
## if not np.isclose(mu_inf_1, mu_inf_2, rtol=1e-14):
|
|
347
|
+
## raise AssertionError('inconsistency in Sutherland calc --> check')
|
|
348
|
+
## if not np.isclose(mu_inf_2, mu_inf_3, rtol=1e-14):
|
|
349
|
+
## raise AssertionError('inconsistency in Sutherland calc --> check')
|
|
350
|
+
## mu_inf = self.mu_inf = mu_inf_2
|
|
351
|
+
|
|
352
|
+
self.mu_inf = self.mu_Suth_ref*(self.T_inf/self.T_Suth_ref)**(3/2) * ((self.T_Suth_ref+self.S_Suth)/(self.T_inf+self.S_Suth))
|
|
353
|
+
self.rho_inf = self.p_inf/(self.R*self.T_inf)
|
|
354
|
+
self.nu_inf = self.mu_inf/self.rho_inf
|
|
355
|
+
self.a_inf = np.sqrt(self.kappa*self.R*self.T_inf)
|
|
356
|
+
self.U_inf = self.Ma*self.a_inf
|
|
357
|
+
self.cp = self.R*self.kappa/(self.kappa-1.)
|
|
358
|
+
self.cv = self.cp/self.kappa
|
|
359
|
+
self.recov_fac = self.Pr**(1/3)
|
|
360
|
+
self.Taw = self.T_inf + self.recov_fac*self.U_inf**2/(2*self.cp)
|
|
361
|
+
self.lchar = self.Re*self.nu_inf/self.U_inf
|
|
362
|
+
|
|
363
|
+
self.tchar = self.lchar / self.U_inf
|
|
364
|
+
self.uchar = self.U_inf
|
|
365
|
+
|
|
366
|
+
if verbose: print(72*'-')
|
|
367
|
+
if verbose: even_print('rho_inf' , '%0.3f [kg/m³]' % self.rho_inf )
|
|
368
|
+
if verbose: even_print('mu_inf' , '%0.6E [kg/(m·s)]' % self.mu_inf )
|
|
369
|
+
if verbose: even_print('nu_inf' , '%0.6E [m²/s]' % self.nu_inf )
|
|
370
|
+
if verbose: even_print('a_inf' , '%0.6f [m/s]' % self.a_inf )
|
|
371
|
+
if verbose: even_print('U_inf' , '%0.6f [m/s]' % self.U_inf )
|
|
372
|
+
if verbose: even_print('cp' , '%0.3f [J/(kg·K)]' % self.cp )
|
|
373
|
+
if verbose: even_print('cv' , '%0.3f [J/(kg·K)]' % self.cv )
|
|
374
|
+
if verbose: even_print('recovery factor' , '%0.6f [-]' % self.recov_fac )
|
|
375
|
+
if verbose: even_print('Taw' , '%0.3f [K]' % self.Taw )
|
|
376
|
+
if verbose: even_print('lchar' , '%0.6E [m]' % self.lchar )
|
|
377
|
+
if verbose: even_print('tchar' , '%0.6E [s]' % self.tchar )
|
|
378
|
+
#if verbose: print(72*'-'+'\n')
|
|
379
|
+
if verbose: print(72*'-')
|
|
380
|
+
|
|
381
|
+
# === write the 'derived' udef variables to a dict attribute of the RGD instance
|
|
382
|
+
self.udef_deriv = { 'rho_inf':self.rho_inf,
|
|
383
|
+
'mu_inf':self.mu_inf,
|
|
384
|
+
'nu_inf':self.nu_inf,
|
|
385
|
+
'a_inf':self.a_inf,
|
|
386
|
+
'U_inf':self.U_inf,
|
|
387
|
+
'cp':self.cp,
|
|
388
|
+
'cv':self.cv,
|
|
389
|
+
'recov_fac':self.recov_fac,
|
|
390
|
+
'Taw':self.Taw,
|
|
391
|
+
'lchar':self.lchar,
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
#if ('duration_avg' in self.attrs.keys()):
|
|
395
|
+
# self.duration_avg = self.attrs['duration_avg']
|
|
396
|
+
#if ('nx' in self.attrs.keys()):
|
|
397
|
+
# self.nx = self.attrs['nx']
|
|
398
|
+
#if ('ny' in self.attrs.keys()):
|
|
399
|
+
# self.ny = self.attrs['ny']
|
|
400
|
+
|
|
401
|
+
# if ('p_inf' in self.attrs.keys()):
|
|
402
|
+
# self.p_inf = self.attrs['p_inf']
|
|
403
|
+
# if ('lchar' in self.attrs.keys()):
|
|
404
|
+
# self.lchar = self.attrs['lchar']
|
|
405
|
+
# if ('U_inf' in self.attrs.keys()):
|
|
406
|
+
# self.U_inf = self.attrs['U_inf']
|
|
407
|
+
# if ('Re' in self.attrs.keys()):
|
|
408
|
+
# self.Re = self.attrs['Re']
|
|
409
|
+
# if ('T_inf' in self.attrs.keys()):
|
|
410
|
+
# self.T_inf = self.attrs['T_inf']
|
|
411
|
+
# if ('rho_inf' in self.attrs.keys()):
|
|
412
|
+
# self.rho_inf = self.attrs['rho_inf']
|
|
413
|
+
|
|
414
|
+
if 0: ## could potentially be big
|
|
415
|
+
if ('dims/xyz' in self):
|
|
416
|
+
self.xyz = np.copy( self['dims/xyz'][()] )
|
|
417
|
+
if ('dims/stang' in self):
|
|
418
|
+
self.stang = np.copy( self['dims/stang'][()] )
|
|
419
|
+
if ('dims/snorm' in self):
|
|
420
|
+
self.snorm = np.copy( self['dims/snorm'][()] )
|
|
421
|
+
if ('dims/crv_R' in self):
|
|
422
|
+
self.crv_R = np.copy( self['dims/crv_R'][()] )
|
|
423
|
+
|
|
424
|
+
if ('n_quads' in self.attrs.keys()):
|
|
425
|
+
self.n_quads = int( self.attrs['n_quads'] )
|
|
426
|
+
if ('n_pts' in self.attrs.keys()):
|
|
427
|
+
self.n_pts = int( self.attrs['n_pts'] )
|
|
428
|
+
if ('ni' in self.attrs.keys()):
|
|
429
|
+
self.ni = int( self.attrs['ni'] )
|
|
430
|
+
if ('nj' in self.attrs.keys()):
|
|
431
|
+
self.nj = int( self.attrs['nj'] )
|
|
432
|
+
|
|
433
|
+
if ('nt' in self.attrs.keys()):
|
|
434
|
+
self.nt = int( self.attrs['nt'] )
|
|
435
|
+
|
|
436
|
+
if ('dims/t' in self):
|
|
437
|
+
self.t = np.copy( self['dims/t'][()] )
|
|
438
|
+
if hasattr(self,'t'):
|
|
439
|
+
if (self.t.ndim!=1):
|
|
440
|
+
raise ValueError('self.t.ndim!=1')
|
|
441
|
+
nt = self.t.shape[0]
|
|
442
|
+
|
|
443
|
+
if hasattr(self,'nt'):
|
|
444
|
+
if not isinstance(self.nt, (int,np.int32,np.int64)):
|
|
445
|
+
raise TypeError('self.nt is not type int')
|
|
446
|
+
if (self.nt != nt):
|
|
447
|
+
raise ValueError('self.nt != nt')
|
|
448
|
+
else:
|
|
449
|
+
#self.attrs['nt'] = nt
|
|
450
|
+
self.nt = nt
|
|
451
|
+
|
|
452
|
+
## check n_quads / n_pts is consistent with xyz
|
|
453
|
+
## if xyz exists and attrs n_quads/n_pts do not exist, set them
|
|
454
|
+
if hasattr(self,'xyz'):
|
|
455
|
+
if (self.xyz.ndim!=3):
|
|
456
|
+
raise ValueError('self.xyz.ndim!=3')
|
|
457
|
+
ni,nj,three = self.xyz.shape
|
|
458
|
+
|
|
459
|
+
if hasattr(self,'ni'):
|
|
460
|
+
if not isinstance(self.ni, (int,np.int32,np.int64)):
|
|
461
|
+
raise TypeError('self.ni is not type int')
|
|
462
|
+
if (self.ni != ni):
|
|
463
|
+
raise ValueError('self.ni != ni')
|
|
464
|
+
else:
|
|
465
|
+
#self.attrs['ni'] = ni
|
|
466
|
+
self.ni = ni
|
|
467
|
+
|
|
468
|
+
if hasattr(self,'nj'):
|
|
469
|
+
if not isinstance(self.nj, (int,np.int32,np.int64)):
|
|
470
|
+
raise TypeError('self.nj is not type int')
|
|
471
|
+
if (self.nj != nj):
|
|
472
|
+
raise ValueError('self.nj != nj')
|
|
473
|
+
else:
|
|
474
|
+
#self.attrs['nj'] = nj
|
|
475
|
+
self.nj = nj
|
|
476
|
+
|
|
477
|
+
if hasattr(self,'n_quads'):
|
|
478
|
+
if not isinstance(self.n_quads, (int,np.int32,np.int64)):
|
|
479
|
+
raise TypeError('self.n_quads is not type int')
|
|
480
|
+
if (self.n_quads != (ni-1)*(nj-1)):
|
|
481
|
+
raise ValueError('self.n_quads != (ni-1)*(nj-1)')
|
|
482
|
+
else:
|
|
483
|
+
#self.attrs['n_quads'] = (ni-1)*(nj-1)
|
|
484
|
+
self.n_quads = (ni-1)*(nj-1)
|
|
485
|
+
|
|
486
|
+
if hasattr(self,'n_pts'):
|
|
487
|
+
if not isinstance(self.n_pts, (int,np.int32,np.int64)):
|
|
488
|
+
raise TypeError('self.n_pts is not type int')
|
|
489
|
+
if (self.n_pts != ni*nj):
|
|
490
|
+
raise ValueError('self.n_pts != ni*nj')
|
|
491
|
+
else:
|
|
492
|
+
#self.attrs['n_pts'] = ni*nj
|
|
493
|
+
self.n_pts = ni*nj
|
|
494
|
+
|
|
495
|
+
if any([hasattr(self,'ni'), hasattr(self,'nj'), hasattr(self,'n_quads'), hasattr(self,'n_pts') ]):
|
|
496
|
+
if verbose and hasattr(self,'nt'): even_print('nt', f'{self.nt:d}')
|
|
497
|
+
if verbose and hasattr(self,'ni'): even_print('ni', f'{self.ni:d}')
|
|
498
|
+
if verbose and hasattr(self,'nj'): even_print('nj', f'{self.nj:d}')
|
|
499
|
+
if verbose and hasattr(self,'n_quads'): even_print('n_quads', f'{self.n_quads:d}')
|
|
500
|
+
if verbose and hasattr(self,'n_pts'): even_print('n_pts', f'{self.n_pts:d}')
|
|
501
|
+
#if verbose: print(72*'-')
|
|
502
|
+
|
|
503
|
+
# === ts group names & scalars
|
|
504
|
+
|
|
505
|
+
if ('data' in self):
|
|
506
|
+
#self.scalars = list(self['data'].keys())
|
|
507
|
+
self.scalars = [ k for k,v in self['data'].items() if isinstance(v,h5py.Dataset) ]
|
|
508
|
+
self.n_scalars = len(self.scalars)
|
|
509
|
+
self.scalars_dtypes = []
|
|
510
|
+
for scalar in self.scalars:
|
|
511
|
+
self.scalars_dtypes.append(self[f'data/{scalar}'].dtype)
|
|
512
|
+
self.scalars_dtypes_dict = dict(zip(self.scalars, self.scalars_dtypes)) ## dict {<<scalar>>: <<dtype>>}
|
|
513
|
+
else:
|
|
514
|
+
self.scalars = []
|
|
515
|
+
self.n_scalars = 0
|
|
516
|
+
self.scalars_dtypes = []
|
|
517
|
+
self.scalars_dtypes_dict = dict(zip(self.scalars, self.scalars_dtypes))
|
|
518
|
+
|
|
519
|
+
return
|
|
520
|
+
|
|
521
|
+
def gen_unstruct_xyz(self,**kwargs):
|
|
522
|
+
'''
|
|
523
|
+
convert structured grid coords (data/xyz) to unstructured:
|
|
524
|
+
dims/quads
|
|
525
|
+
dims/pts
|
|
526
|
+
'''
|
|
527
|
+
|
|
528
|
+
verbose = kwargs.get( 'verbose' , True )
|
|
529
|
+
#indexing = kwargs.get( 'indexing' , 'xy' ) ## 'xy', 'ij'
|
|
530
|
+
chunk_kb = kwargs.get( 'chunk_kb' , 1*1024 ) ## 1 [MB]
|
|
531
|
+
chunk_base = kwargs.get( 'chunk_base' , 2 )
|
|
532
|
+
|
|
533
|
+
if self.usingmpi:
|
|
534
|
+
raise ValueError('spd.gen_unstruct_xyz() should not be run in MPI mode')
|
|
535
|
+
|
|
536
|
+
if ('dims/xyz' not in self):
|
|
537
|
+
raise ValueError('dims/xyz not in file')
|
|
538
|
+
|
|
539
|
+
if verbose: print('\n'+'spd.gen_unstruct_xyz()'+'\n'+72*'-')
|
|
540
|
+
t_start_func = timeit.default_timer()
|
|
541
|
+
|
|
542
|
+
xyz = np.copy( self['dims/xyz'][()] )
|
|
543
|
+
if verbose:
|
|
544
|
+
even_print('dims/xyz',str(xyz.shape))
|
|
545
|
+
|
|
546
|
+
if (xyz.ndim!=3):
|
|
547
|
+
raise ValueError('xyz.ndim!=3')
|
|
548
|
+
|
|
549
|
+
ni,nj,three = xyz.shape
|
|
550
|
+
|
|
551
|
+
n_quads = (ni-1)*(nj-1)
|
|
552
|
+
n_pts = ni*nj
|
|
553
|
+
nt = self.nt
|
|
554
|
+
|
|
555
|
+
if verbose:
|
|
556
|
+
even_print('ni' , f'{ni:d}' )
|
|
557
|
+
even_print('nj' , f'{nj:d}' )
|
|
558
|
+
even_print('n_quads' , f'{n_quads:d}' )
|
|
559
|
+
even_print('n_pts' , f'{n_pts:d}' )
|
|
560
|
+
even_print('nt' , f'{nt:d}' )
|
|
561
|
+
|
|
562
|
+
# ===
|
|
563
|
+
|
|
564
|
+
indexing = 'xy'
|
|
565
|
+
|
|
566
|
+
xi, yi = np.meshgrid(np.arange(ni,dtype=np.int64), np.arange(nj,dtype=np.int64), indexing=indexing)
|
|
567
|
+
|
|
568
|
+
inds_list = np.stack((xi,yi), axis=2)
|
|
569
|
+
inds_list = np.reshape(inds_list, (ni*nj,2), order='C')
|
|
570
|
+
inds_list = np.ravel_multi_index((inds_list[:,0],inds_list[:,1]), (ni,nj), order='F')
|
|
571
|
+
inds_list = np.reshape(inds_list, (ni,nj), order='C')
|
|
572
|
+
|
|
573
|
+
if verbose:
|
|
574
|
+
progress_bar = tqdm(
|
|
575
|
+
total=(ni-1)*(nj-1),
|
|
576
|
+
ncols=100,
|
|
577
|
+
desc='quads',
|
|
578
|
+
leave=True,
|
|
579
|
+
file=sys.stdout,
|
|
580
|
+
mininterval=0.1,
|
|
581
|
+
smoothing=0.,
|
|
582
|
+
#bar_format="\033[B{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}\033[A\n\b",
|
|
583
|
+
bar_format="{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}",
|
|
584
|
+
ascii="░█",
|
|
585
|
+
colour='#FF6600',
|
|
586
|
+
)
|
|
587
|
+
|
|
588
|
+
## quad index array
|
|
589
|
+
quads = np.zeros(((ni-1),(nj-1),4), dtype=np.int64)
|
|
590
|
+
for i in range(ni-1):
|
|
591
|
+
for j in range(nj-1):
|
|
592
|
+
|
|
593
|
+
## Counter-Clockwise (CCW)
|
|
594
|
+
quads[i,j,0] = inds_list[i, j ]
|
|
595
|
+
quads[i,j,1] = inds_list[i+1, j ]
|
|
596
|
+
quads[i,j,2] = inds_list[i+1, j+1]
|
|
597
|
+
quads[i,j,3] = inds_list[i, j+1]
|
|
598
|
+
|
|
599
|
+
## Clockwise (CW)
|
|
600
|
+
#quads[i,j,0] = inds_list[i, j ]
|
|
601
|
+
#quads[i,j,1] = inds_list[i, j+1]
|
|
602
|
+
#quads[i,j,2] = inds_list[i+1, j+1]
|
|
603
|
+
#quads[i,j,3] = inds_list[i+1, j ]
|
|
604
|
+
|
|
605
|
+
if verbose: progress_bar.update()
|
|
606
|
+
if verbose: progress_bar.close()
|
|
607
|
+
|
|
608
|
+
# ===
|
|
609
|
+
|
|
610
|
+
if (indexing=='xy'):
|
|
611
|
+
order='C'
|
|
612
|
+
elif (indexing=='ij'):
|
|
613
|
+
order = 'F'
|
|
614
|
+
else:
|
|
615
|
+
raise ValueError
|
|
616
|
+
|
|
617
|
+
# === dims_unstruct/quads
|
|
618
|
+
|
|
619
|
+
## flatten quad index vector
|
|
620
|
+
quads = np.reshape(quads, ((ni-1)*(nj-1),4), order=order)
|
|
621
|
+
|
|
622
|
+
dsn = 'dims/quads'
|
|
623
|
+
if (dsn in self):
|
|
624
|
+
del self[dsn]
|
|
625
|
+
|
|
626
|
+
shape = quads.shape
|
|
627
|
+
dtype = quads.dtype
|
|
628
|
+
itemsize = quads.dtype.itemsize
|
|
629
|
+
chunks = h5_chunk_sizer(nxi=shape, constraint=(None,None), size_kb=chunk_kb, base=chunk_base, itemsize=itemsize)
|
|
630
|
+
ds = self.create_dataset(
|
|
631
|
+
dsn,
|
|
632
|
+
shape=shape,
|
|
633
|
+
chunks=chunks,
|
|
634
|
+
dtype=dtype,
|
|
635
|
+
)
|
|
636
|
+
|
|
637
|
+
chunk_kb_ = np.prod(ds.chunks)*itemsize / 1024. ## actual
|
|
638
|
+
if verbose:
|
|
639
|
+
even_print('chunk shape (n_quads,4)','%s'%str(ds.chunks))
|
|
640
|
+
even_print('chunk size','%i [KB]'%int(round(chunk_kb_)))
|
|
641
|
+
|
|
642
|
+
if verbose: even_print(dsn,'%s'%str(ds.shape))
|
|
643
|
+
|
|
644
|
+
## write
|
|
645
|
+
ds[:,:] = quads
|
|
646
|
+
|
|
647
|
+
# === dims_unstruct/pts
|
|
648
|
+
|
|
649
|
+
## flatten point coordinate vector
|
|
650
|
+
pts = np.reshape(xyz, (ni*nj,3), order=order)
|
|
651
|
+
|
|
652
|
+
dsn = 'dims/pts'
|
|
653
|
+
if (dsn in self):
|
|
654
|
+
del self[dsn]
|
|
655
|
+
|
|
656
|
+
shape = pts.shape
|
|
657
|
+
dtype = pts.dtype
|
|
658
|
+
itemsize = pts.dtype.itemsize
|
|
659
|
+
chunks = h5_chunk_sizer(nxi=shape, constraint=(None,None), size_kb=chunk_kb, base=chunk_base, itemsize=itemsize)
|
|
660
|
+
ds = self.create_dataset(
|
|
661
|
+
dsn,
|
|
662
|
+
shape=shape,
|
|
663
|
+
chunks=chunks,
|
|
664
|
+
dtype=dtype,
|
|
665
|
+
)
|
|
666
|
+
|
|
667
|
+
chunk_kb_ = np.prod(ds.chunks)*itemsize / 1024. ## actual
|
|
668
|
+
if verbose:
|
|
669
|
+
even_print('chunk shape (n_pts,3)','%s'%str(ds.chunks))
|
|
670
|
+
even_print('chunk size','%i [KB]'%int(round(chunk_kb_)))
|
|
671
|
+
|
|
672
|
+
if verbose: even_print(dsn,'%s'%str(ds.shape))
|
|
673
|
+
|
|
674
|
+
## write
|
|
675
|
+
ds[:,:] = pts
|
|
676
|
+
|
|
677
|
+
if verbose: print(72*'-')
|
|
678
|
+
if verbose: print('total time : spd.gen_unstruct_xyz() : %s'%format_time_string((timeit.default_timer() - t_start_func)))
|
|
679
|
+
if verbose: print(72*'-')
|
|
680
|
+
return
|
|
681
|
+
|
|
682
|
+
@staticmethod
|
|
683
|
+
def copy(fn_spd_src, fn_spd_tgt, **kwargs):
|
|
684
|
+
'''
|
|
685
|
+
copy header info, selected scalars, and [i,j,t] range to new SPD file
|
|
686
|
+
- currently copies complete [i,j] range
|
|
687
|
+
- if [i,j] range clipping were to be implemented, taking data_unstruct would be difficult
|
|
688
|
+
--> this currently does NOT work in serial mode
|
|
689
|
+
'''
|
|
690
|
+
|
|
691
|
+
#comm = MPI.COMM_WORLD
|
|
692
|
+
rank = MPI.COMM_WORLD.Get_rank()
|
|
693
|
+
n_ranks = MPI.COMM_WORLD.Get_size()
|
|
694
|
+
|
|
695
|
+
if (rank==0):
|
|
696
|
+
verbose = True
|
|
697
|
+
else:
|
|
698
|
+
verbose = False
|
|
699
|
+
|
|
700
|
+
if verbose: print('\n'+'spd.copy()'+'\n'+72*'-')
|
|
701
|
+
t_start_func = timeit.default_timer()
|
|
702
|
+
|
|
703
|
+
rx = kwargs.get('rx',None)
|
|
704
|
+
ry = kwargs.get('ry',None)
|
|
705
|
+
rz = kwargs.get('rz',None)
|
|
706
|
+
|
|
707
|
+
ri = kwargs.get('ri',1)
|
|
708
|
+
rj = kwargs.get('rj',1)
|
|
709
|
+
|
|
710
|
+
rt = kwargs.get('rt',1)
|
|
711
|
+
force = kwargs.get('force',False) ## overwrite or raise error if exists
|
|
712
|
+
|
|
713
|
+
ti_min = kwargs.get('ti_min',None)
|
|
714
|
+
ti_max = kwargs.get('ti_max',None)
|
|
715
|
+
#scalars = kwargs.get('scalars',None)
|
|
716
|
+
|
|
717
|
+
i_min = kwargs.get( 'i_min' , None )
|
|
718
|
+
i_max = kwargs.get( 'i_max' , None )
|
|
719
|
+
j_min = kwargs.get( 'j_min' , None )
|
|
720
|
+
j_max = kwargs.get( 'j_max' , None )
|
|
721
|
+
ti_min = kwargs.get( 'ti_min' , None )
|
|
722
|
+
ti_max = kwargs.get( 'ti_max' , None )
|
|
723
|
+
|
|
724
|
+
ct = kwargs.get('ct',1) ## 'chunks' in time
|
|
725
|
+
|
|
726
|
+
chunk_kb = kwargs.get('chunk_kb',2*1024) ## h5 chunk size: default 2 [MB]
|
|
727
|
+
chunk_constraint = kwargs.get('chunk_constraint',(None,None,1)) ## the 'constraint' parameter for sizing h5 chunks (i,j,t)
|
|
728
|
+
chunk_base = kwargs.get('chunk_base',2)
|
|
729
|
+
|
|
730
|
+
stripe_count = kwargs.pop('stripe_count' , 16 ) ## for initializing SPD file
|
|
731
|
+
stripe_size_mb = kwargs.pop('stripe_size_mb' , 2 )
|
|
732
|
+
|
|
733
|
+
#xi_step = kwargs.get('xi_step',1)
|
|
734
|
+
#yi_step = kwargs.get('yi_step',1)
|
|
735
|
+
#zi_step = kwargs.get('zi_step',1)
|
|
736
|
+
|
|
737
|
+
if (rx is not None):
|
|
738
|
+
raise ValueError('rx not a valid option for spd.copy(). accepted are: ri,rj')
|
|
739
|
+
if (ry is not None):
|
|
740
|
+
raise ValueError('ry not a valid option for spd.copy(). accepted are: ri,rj')
|
|
741
|
+
if (rz is not None):
|
|
742
|
+
raise ValueError('rz not a valid option for spd.copy(). accepted are: ri,rj')
|
|
743
|
+
|
|
744
|
+
if (i_min is not None):
|
|
745
|
+
raise NotImplementedError('i/j_min/max not yet supported')
|
|
746
|
+
if (i_max is not None):
|
|
747
|
+
raise NotImplementedError('i/j_min/max not yet supported')
|
|
748
|
+
if (j_min is not None):
|
|
749
|
+
raise NotImplementedError('i/j_min/max not yet supported')
|
|
750
|
+
if (j_max is not None):
|
|
751
|
+
raise NotImplementedError('i/j_min/max not yet supported')
|
|
752
|
+
|
|
753
|
+
if (rt!=1):
|
|
754
|
+
raise AssertionError('rt!=1')
|
|
755
|
+
if (ri*rj!=n_ranks):
|
|
756
|
+
raise AssertionError('ri*rj!=n_ranks')
|
|
757
|
+
if not os.path.isfile(fn_spd_src):
|
|
758
|
+
raise FileNotFoundError('%s not found!'%fn_spd_src)
|
|
759
|
+
if os.path.isfile(fn_spd_tgt) and not force:
|
|
760
|
+
raise FileExistsError('%s already exists. delete it or use \'force=True\' kwarg'%fn_spd_tgt)
|
|
761
|
+
|
|
762
|
+
# ===
|
|
763
|
+
|
|
764
|
+
with spd(fn_spd_src, 'r', comm=MPI.COMM_WORLD, driver='mpio') as hf_src:
|
|
765
|
+
with spd(fn_spd_tgt, 'w',
|
|
766
|
+
force=force,
|
|
767
|
+
comm=MPI.COMM_WORLD,
|
|
768
|
+
driver='mpio',
|
|
769
|
+
stripe_count=stripe_count,
|
|
770
|
+
stripe_size_mb=stripe_size_mb) as hf_tgt:
|
|
771
|
+
|
|
772
|
+
ni = hf_src.ni
|
|
773
|
+
nj = hf_src.nj
|
|
774
|
+
#n_quads = hf_src.n_quads
|
|
775
|
+
#n_pts = hf_src.n_pts
|
|
776
|
+
nt = hf_src.nt
|
|
777
|
+
|
|
778
|
+
## report info from source file
|
|
779
|
+
fsize = os.path.getsize(hf_src.fname)/1024**3
|
|
780
|
+
if verbose: even_print(os.path.basename(hf_src.fname),'%0.1f [GB]'%fsize)
|
|
781
|
+
if verbose: even_print('ni','%i'%hf_src.ni)
|
|
782
|
+
if verbose: even_print('nj','%i'%hf_src.nj)
|
|
783
|
+
if verbose: even_print('nt','%i'%hf_src.nt)
|
|
784
|
+
if verbose: even_print('n_quads','%i'%hf_src.n_quads)
|
|
785
|
+
if verbose: even_print('n_pts','%i'%hf_src.n_pts)
|
|
786
|
+
if verbose: print(72*'-')
|
|
787
|
+
|
|
788
|
+
# ===
|
|
789
|
+
|
|
790
|
+
## get OUTPUT times
|
|
791
|
+
t_ = np.copy(hf_src['dims/t'][()])
|
|
792
|
+
ti_ = np.arange(t_.shape[0], dtype=np.int32)
|
|
793
|
+
if (ti_min is None):
|
|
794
|
+
ti_min = ti_.min()
|
|
795
|
+
if (ti_max is None):
|
|
796
|
+
ti_max = ti_.max()
|
|
797
|
+
ti = np.copy(ti_[ti_min:ti_max+1])
|
|
798
|
+
if (ti.shape[0]==0):
|
|
799
|
+
raise ValueError('ti_min/ti_max combo yields no times')
|
|
800
|
+
ti1 = ti.min()
|
|
801
|
+
ti2 = ti.max()+1
|
|
802
|
+
t = np.copy(t_[ti1:ti2])
|
|
803
|
+
nt = t.shape[0]
|
|
804
|
+
|
|
805
|
+
if (ti_min<0):
|
|
806
|
+
if verbose: even_print('ti_min', f'{ti_min:d} / {ti1:d}')
|
|
807
|
+
else:
|
|
808
|
+
if verbose: even_print('ti_min', f'{ti_min:d}')
|
|
809
|
+
|
|
810
|
+
if (ti_max<0):
|
|
811
|
+
if verbose: even_print('ti_max', f'{ti_max:d} / {ti2:d}')
|
|
812
|
+
else:
|
|
813
|
+
if verbose: even_print('ti_max', f'{ti_max:d}')
|
|
814
|
+
|
|
815
|
+
if verbose: even_print('t range', f'{ti.shape[0]:d}/{ti_.shape[0]:d}')
|
|
816
|
+
|
|
817
|
+
# ===
|
|
818
|
+
|
|
819
|
+
## time chunk ranges
|
|
820
|
+
if (ct>nt):
|
|
821
|
+
raise ValueError('ct>nt')
|
|
822
|
+
|
|
823
|
+
tfi = np.arange(ti1,ti2,dtype=np.int64)
|
|
824
|
+
ctl_ = np.array_split(tfi,ct)
|
|
825
|
+
ctl = [[b[0],b[-1]+1] for b in ctl_ ]
|
|
826
|
+
|
|
827
|
+
if verbose: print(72*'-')
|
|
828
|
+
|
|
829
|
+
# ===
|
|
830
|
+
|
|
831
|
+
## copy over attributes
|
|
832
|
+
for key,val in hf_src.attrs.items():
|
|
833
|
+
hf_tgt.attrs[key] = val
|
|
834
|
+
|
|
835
|
+
# === get rank distribution over (i,j) dims
|
|
836
|
+
|
|
837
|
+
comm2d = hf_src.comm.Create_cart(dims=[ri,rj], periods=[False,False], reorder=False)
|
|
838
|
+
t2d = comm2d.Get_coords(rank)
|
|
839
|
+
|
|
840
|
+
ril_ = np.array_split(np.arange(hf_src.ni,dtype=np.int64),ri)
|
|
841
|
+
rjl_ = np.array_split(np.arange(hf_src.nj,dtype=np.int64),rj)
|
|
842
|
+
|
|
843
|
+
ril = [[b[0],b[-1]+1] for b in ril_ ]
|
|
844
|
+
rjl = [[b[0],b[-1]+1] for b in rjl_ ]
|
|
845
|
+
|
|
846
|
+
ri1, ri2 = ril[t2d[0]] #; nir = ri2 - ri1
|
|
847
|
+
rj1, rj2 = rjl[t2d[1]] #; njr = rj2 - rj1
|
|
848
|
+
|
|
849
|
+
# === copy over non-attribute metadata
|
|
850
|
+
|
|
851
|
+
## 'dims/xyz' : 3D polydata grid coordinates : shape=(ni,nj,3)
|
|
852
|
+
dsn = 'dims/xyz'
|
|
853
|
+
dset = hf_src[dsn]
|
|
854
|
+
dtype = dset.dtype
|
|
855
|
+
float_bytes = dtype.itemsize
|
|
856
|
+
with dset.collective:
|
|
857
|
+
xyz = np.copy( dset[ri1:ri2,rj1:rj2,:] )
|
|
858
|
+
shape = (ni,nj,3)
|
|
859
|
+
chunks = h5_chunk_sizer(nxi=shape, constraint=(None,None,3), size_kb=chunk_kb, base=4, itemsize=float_bytes)
|
|
860
|
+
data_gb = float_bytes * ni * nj / 1024**3
|
|
861
|
+
if verbose:
|
|
862
|
+
even_print(f'initializing {dsn}','%0.1f [GB]'%(data_gb,))
|
|
863
|
+
dset = hf_tgt.create_dataset(dsn, dtype=xyz.dtype, shape=shape, chunks=chunks)
|
|
864
|
+
chunk_kb_ = np.prod(dset.chunks)*float_bytes / 1024. ## actual
|
|
865
|
+
if verbose:
|
|
866
|
+
even_print('chunk shape (i,j,3)',str(dset.chunks))
|
|
867
|
+
even_print('chunk size','%i [KB]'%int(round(chunk_kb_)))
|
|
868
|
+
with dset.collective:
|
|
869
|
+
dset[ri1:ri2,rj1:rj2,:] = xyz
|
|
870
|
+
|
|
871
|
+
if verbose: print(72*'-')
|
|
872
|
+
|
|
873
|
+
## copy over [t]
|
|
874
|
+
dsn = 'dims/t'
|
|
875
|
+
ds = hf_tgt.create_dataset(dsn, chunks=None, data=t)
|
|
876
|
+
if verbose: even_print(dsn,str(ds.shape))
|
|
877
|
+
hf_tgt.attrs['nt'] = t.shape[0]
|
|
878
|
+
|
|
879
|
+
## copy over additional [dims/<>] dsets
|
|
880
|
+
for dsn in [ 'dims/stang', 'dims/snorm', 'dims/crv_R', 'dims/x', 'dims/y', 'dims/z' ]:
|
|
881
|
+
if (dsn in hf_src):
|
|
882
|
+
data = np.copy(hf_src[dsn][()])
|
|
883
|
+
ds = hf_tgt.create_dataset(dsn, data=data, chunks=None)
|
|
884
|
+
if verbose: even_print(dsn,str(ds.shape))
|
|
885
|
+
else:
|
|
886
|
+
if verbose: even_print(dsn,'not found')
|
|
887
|
+
|
|
888
|
+
## copy over additional [csys/<>] dsets
|
|
889
|
+
for dsn in [ 'csys/vtang', 'csys/vnorm' ]:
|
|
890
|
+
if (dsn in hf_src):
|
|
891
|
+
data = np.copy(hf_src[dsn][()])
|
|
892
|
+
ds = hf_tgt.create_dataset(dsn, data=data, chunks=None)
|
|
893
|
+
if verbose: even_print(dsn,str(ds.shape))
|
|
894
|
+
else:
|
|
895
|
+
if verbose: even_print(dsn,'not found')
|
|
896
|
+
|
|
897
|
+
if verbose: print(72*'-')
|
|
898
|
+
hf_tgt.get_header(verbose=verbose)
|
|
899
|
+
if verbose: print(72*'-')
|
|
900
|
+
|
|
901
|
+
# === initialize datasets in target file
|
|
902
|
+
|
|
903
|
+
for scalar in hf_src.scalars:
|
|
904
|
+
|
|
905
|
+
dsn = f'data/{scalar}'
|
|
906
|
+
ds = hf_src[dsn]
|
|
907
|
+
dtype = ds.dtype
|
|
908
|
+
float_bytes = dtype.itemsize
|
|
909
|
+
|
|
910
|
+
data_gb = ni * nj * nt * float_bytes / 1024**3
|
|
911
|
+
shape = (ni,nj,nt)
|
|
912
|
+
chunks = h5_chunk_sizer(nxi=shape, constraint=chunk_constraint, size_kb=chunk_kb, base=chunk_base, itemsize=float_bytes)
|
|
913
|
+
|
|
914
|
+
if verbose:
|
|
915
|
+
even_print(f'initializing data/{scalar}','%0.1f [GB]'%(data_gb,))
|
|
916
|
+
if (dsn in hf_tgt):
|
|
917
|
+
del hf_tgt[dsn]
|
|
918
|
+
dset = hf_tgt.create_dataset(
|
|
919
|
+
dsn,
|
|
920
|
+
shape=shape,
|
|
921
|
+
dtype=dtype,
|
|
922
|
+
chunks=chunks,
|
|
923
|
+
)
|
|
924
|
+
|
|
925
|
+
chunk_kb_ = np.prod(dset.chunks)*4 / 1024. ## actual
|
|
926
|
+
if verbose:
|
|
927
|
+
even_print('chunk shape (i,j,t)','%s'%str(dset.chunks))
|
|
928
|
+
even_print('chunk size','%i [KB]'%int(round(chunk_kb_)))
|
|
929
|
+
|
|
930
|
+
if verbose: print(72*'-')
|
|
931
|
+
|
|
932
|
+
# === main loop
|
|
933
|
+
|
|
934
|
+
data_gb_read = 0.
|
|
935
|
+
data_gb_write = 0.
|
|
936
|
+
t_read = 0.
|
|
937
|
+
t_write = 0.
|
|
938
|
+
|
|
939
|
+
if verbose:
|
|
940
|
+
progress_bar = tqdm(
|
|
941
|
+
total=len(ctl)*hf_src.n_scalars,
|
|
942
|
+
ncols=100,
|
|
943
|
+
desc='copy',
|
|
944
|
+
leave=True,
|
|
945
|
+
file=sys.stdout,
|
|
946
|
+
mininterval=0.1,
|
|
947
|
+
smoothing=0.,
|
|
948
|
+
#bar_format="\033[B{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}\033[A\n\b",
|
|
949
|
+
bar_format="{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}",
|
|
950
|
+
ascii="░█",
|
|
951
|
+
colour='#FF6600',
|
|
952
|
+
)
|
|
953
|
+
|
|
954
|
+
for scalar in hf_src.scalars:
|
|
955
|
+
dset_src = hf_src[f'data/{scalar}']
|
|
956
|
+
dset_tgt = hf_tgt[f'data/{scalar}']
|
|
957
|
+
|
|
958
|
+
dtype = dset_src.dtype
|
|
959
|
+
float_bytes = dtype.itemsize
|
|
960
|
+
|
|
961
|
+
for ctl_ in ctl:
|
|
962
|
+
ct1, ct2 = ctl_
|
|
963
|
+
ntc = ct2 - ct1
|
|
964
|
+
|
|
965
|
+
## read
|
|
966
|
+
hf_src.comm.Barrier()
|
|
967
|
+
t_start = timeit.default_timer()
|
|
968
|
+
with dset_src.collective:
|
|
969
|
+
data = np.copy( dset_src[ri1:ri2,rj1:rj2,ct1:ct2] )
|
|
970
|
+
hf_src.comm.Barrier()
|
|
971
|
+
t_delta = timeit.default_timer() - t_start
|
|
972
|
+
data_gb = float_bytes * ni * nj * ntc / 1024**3
|
|
973
|
+
|
|
974
|
+
t_read += t_delta
|
|
975
|
+
data_gb_read += data_gb
|
|
976
|
+
|
|
977
|
+
if verbose:
|
|
978
|
+
tqdm.write(even_print(f'read: {scalar}', '%0.3f [GB] %0.3f [s] %0.3f [GB/s]'%(data_gb,t_delta,(data_gb/t_delta)), s=True))
|
|
979
|
+
|
|
980
|
+
## write
|
|
981
|
+
hf_tgt.comm.Barrier()
|
|
982
|
+
t_start = timeit.default_timer()
|
|
983
|
+
with dset_tgt.collective:
|
|
984
|
+
dset_tgt[ri1:ri2,rj1:rj2,:] = data
|
|
985
|
+
hf_tgt.comm.Barrier()
|
|
986
|
+
t_delta = timeit.default_timer() - t_start
|
|
987
|
+
data_gb = float_bytes * ni * nj * ntc / 1024**3
|
|
988
|
+
|
|
989
|
+
t_write += t_delta
|
|
990
|
+
data_gb_write += data_gb
|
|
991
|
+
|
|
992
|
+
if verbose:
|
|
993
|
+
tqdm.write(even_print(f'write: {scalar}', '%0.3f [GB] %0.3f [s] %0.3f [GB/s]'%(data_gb,t_delta,(data_gb/t_delta)), s=True))
|
|
994
|
+
|
|
995
|
+
if verbose: progress_bar.update()
|
|
996
|
+
|
|
997
|
+
if verbose: progress_bar.close()
|
|
998
|
+
|
|
999
|
+
if verbose: print(72*'-')
|
|
1000
|
+
if verbose: print('total time : spd.copy() : %s'%format_time_string((timeit.default_timer() - t_start_func)))
|
|
1001
|
+
if verbose: print(72*'-')
|
|
1002
|
+
return
|
|
1003
|
+
|
|
1004
|
+
def make_xdmf(self, **kwargs):
|
|
1005
|
+
'''
|
|
1006
|
+
generate an XDMF/XMF2 from SPD for processing with Paraview
|
|
1007
|
+
-----
|
|
1008
|
+
--> https://www.xdmf.org/index.php/XDMF_Model_and_Format
|
|
1009
|
+
'''
|
|
1010
|
+
|
|
1011
|
+
if (self.rank==0):
|
|
1012
|
+
verbose = True
|
|
1013
|
+
else:
|
|
1014
|
+
verbose = False
|
|
1015
|
+
|
|
1016
|
+
fname_path = os.path.dirname(self.fname)
|
|
1017
|
+
fname_base = os.path.basename(self.fname)
|
|
1018
|
+
fname_root, fname_ext = os.path.splitext(fname_base)
|
|
1019
|
+
fname_xdmf_base = fname_root+'.xmf2'
|
|
1020
|
+
fname_xdmf = os.path.join(fname_path, fname_xdmf_base)
|
|
1021
|
+
|
|
1022
|
+
if 'dims/quads' not in self:
|
|
1023
|
+
raise ValueError('dims/quads not in file')
|
|
1024
|
+
if 'dims/pts' not in self:
|
|
1025
|
+
raise ValueError('dims/pts not in file')
|
|
1026
|
+
|
|
1027
|
+
## this should be added to spd.get_header()
|
|
1028
|
+
dsn = 'dims/quads'
|
|
1029
|
+
n_quads,four = self[dsn].shape
|
|
1030
|
+
dsn = 'dims/pts'
|
|
1031
|
+
n_pts,three = self[dsn].shape
|
|
1032
|
+
self.n_quads = n_quads
|
|
1033
|
+
self.n_pts = n_pts
|
|
1034
|
+
|
|
1035
|
+
if verbose: print('\n'+'spd.make_xdmf()'+'\n'+72*'-')
|
|
1036
|
+
|
|
1037
|
+
dataset_precision_dict = {} ## holds dtype.itemsize ints i.e. 4,8
|
|
1038
|
+
dataset_numbertype_dict = {} ## holds string description of dtypes i.e. 'Float','Integer'
|
|
1039
|
+
|
|
1040
|
+
# === 1D coordinate dimension vectors --> get dtype.name
|
|
1041
|
+
for dsn in ['pts','quads']:
|
|
1042
|
+
if (f'dims/{dsn}' in self):
|
|
1043
|
+
data = self[f'dims/{dsn}']
|
|
1044
|
+
dataset_precision_dict[dsn] = data.dtype.itemsize
|
|
1045
|
+
if (data.dtype.name=='float32') or (data.dtype.name=='float64'):
|
|
1046
|
+
dataset_numbertype_dict[dsn] = 'Float'
|
|
1047
|
+
elif (data.dtype.name=='int8') or (data.dtype.name=='int16') or (data.dtype.name=='int32') or (data.dtype.name=='int64'):
|
|
1048
|
+
dataset_numbertype_dict[dsn] = 'Integer'
|
|
1049
|
+
else:
|
|
1050
|
+
raise ValueError('dtype not recognized, please update script accordingly')
|
|
1051
|
+
|
|
1052
|
+
## refresh header
|
|
1053
|
+
self.get_header(verbose=False)
|
|
1054
|
+
|
|
1055
|
+
for scalar in self.scalars:
|
|
1056
|
+
data = self['data/%s'%scalar]
|
|
1057
|
+
|
|
1058
|
+
dataset_precision_dict[scalar] = data.dtype.itemsize
|
|
1059
|
+
txt = '%s%s%s%s%s'%(data.dtype.itemsize, ' '*(4-len(str(data.dtype.itemsize))), data.dtype.name, ' '*(10-len(str(data.dtype.name))), data.dtype.byteorder)
|
|
1060
|
+
if verbose: even_print(scalar, txt)
|
|
1061
|
+
|
|
1062
|
+
if (data.dtype.name=='float32') or (data.dtype.name=='float64'):
|
|
1063
|
+
dataset_numbertype_dict[scalar] = 'Float'
|
|
1064
|
+
elif (data.dtype.name=='int8') or (data.dtype.name=='int16') or (data.dtype.name=='int32') or (data.dtype.name=='int64'):
|
|
1065
|
+
dataset_numbertype_dict[scalar] = 'Integer'
|
|
1066
|
+
else:
|
|
1067
|
+
raise TypeError('dtype not recognized, please update script accordingly')
|
|
1068
|
+
|
|
1069
|
+
if verbose: print(72*'-')
|
|
1070
|
+
|
|
1071
|
+
# === write to .xdmf/.xmf2 file
|
|
1072
|
+
if (self.rank==0):
|
|
1073
|
+
|
|
1074
|
+
if not os.path.isfile(fname_xdmf): ## if doesnt exist...
|
|
1075
|
+
Path(fname_xdmf).touch() ## touch XDMF file
|
|
1076
|
+
perms_h5 = oct(os.stat(self.fname).st_mode)[-3:] ## get permissions of SPD file
|
|
1077
|
+
os.chmod(fname_xdmf, int(perms_h5, base=8)) ## change permissions of XDMF file
|
|
1078
|
+
|
|
1079
|
+
#with open(fname_xdmf,'w') as xdmf:
|
|
1080
|
+
with io.open(fname_xdmf,'w',newline='\n') as xdmf:
|
|
1081
|
+
|
|
1082
|
+
xdmf_str='''
|
|
1083
|
+
<?xml version="1.0" encoding="utf-8"?>
|
|
1084
|
+
<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" []>
|
|
1085
|
+
<Xdmf xmlns:xi="http://www.w3.org/2001/XInclude" Version="2.0">
|
|
1086
|
+
<Domain>
|
|
1087
|
+
'''
|
|
1088
|
+
|
|
1089
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 0*' '))
|
|
1090
|
+
|
|
1091
|
+
xdmf_str=f'''
|
|
1092
|
+
<Topology TopologyType="Quadrilateral" NumberOfElements="{n_quads:d}">
|
|
1093
|
+
<DataItem Dimensions="{n_quads:d} 4" NumberType="{dataset_numbertype_dict['quads']}" Precision="{dataset_precision_dict['quads']}" Format="HDF">
|
|
1094
|
+
{self.fname_base}:/dims/quads
|
|
1095
|
+
</DataItem>
|
|
1096
|
+
</Topology>
|
|
1097
|
+
<Geometry GeometryType="XYZ">
|
|
1098
|
+
<DataItem Dimensions="{n_pts:d} 3" NumberType="{dataset_numbertype_dict['pts']}" Precision="{dataset_precision_dict['pts']}" Format="HDF">
|
|
1099
|
+
{self.fname_base}:/dims/pts
|
|
1100
|
+
</DataItem>
|
|
1101
|
+
</Geometry>
|
|
1102
|
+
'''
|
|
1103
|
+
|
|
1104
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 4*' '))
|
|
1105
|
+
|
|
1106
|
+
# ===
|
|
1107
|
+
|
|
1108
|
+
xdmf_str='''
|
|
1109
|
+
<!-- ==================== time series ==================== -->
|
|
1110
|
+
'''
|
|
1111
|
+
|
|
1112
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 4*' '))
|
|
1113
|
+
|
|
1114
|
+
# === the time series
|
|
1115
|
+
|
|
1116
|
+
xdmf_str='''
|
|
1117
|
+
<Grid Name="TimeSeries" GridType="Collection" CollectionType="Temporal">
|
|
1118
|
+
'''
|
|
1119
|
+
|
|
1120
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 4*' '))
|
|
1121
|
+
|
|
1122
|
+
for ti in range(len(self.t)):
|
|
1123
|
+
|
|
1124
|
+
dset_name = 'ts_%08d'%ti
|
|
1125
|
+
|
|
1126
|
+
xdmf_str = f'''
|
|
1127
|
+
<!-- ==================== ts = {ti:d} ==================== -->
|
|
1128
|
+
'''
|
|
1129
|
+
|
|
1130
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 6*' '))
|
|
1131
|
+
|
|
1132
|
+
# ===
|
|
1133
|
+
|
|
1134
|
+
xdmf_str=f'''
|
|
1135
|
+
<Grid Name="{dset_name}" GridType="Uniform">
|
|
1136
|
+
<Time TimeType="Single" Value="{self.t[ti]:0.8E}"/>
|
|
1137
|
+
<Topology Reference="/Xdmf/Domain/Topology[1]" />
|
|
1138
|
+
<Geometry Reference="/Xdmf/Domain/Geometry[1]" />
|
|
1139
|
+
'''
|
|
1140
|
+
|
|
1141
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 6*' '))
|
|
1142
|
+
|
|
1143
|
+
# === .xdmf : <Grid> per scalar
|
|
1144
|
+
|
|
1145
|
+
for scalar in self.scalars:
|
|
1146
|
+
dset_hf_path = f'data/{scalar}'
|
|
1147
|
+
scalar_name = scalar
|
|
1148
|
+
|
|
1149
|
+
xdmf_str=f'''
|
|
1150
|
+
<!-- {scalar} -->
|
|
1151
|
+
<Attribute Name="{scalar_name}" AttributeType="Scalar" Center="Node">
|
|
1152
|
+
<DataItem ItemType="HyperSlab" Dimensions="{self.ni:d} {self.nj:d}" Type="HyperSlab">
|
|
1153
|
+
<DataItem Dimensions="3 3" NumberType="Integer" Format="XML">
|
|
1154
|
+
{0:<9d} {0:<9d} {ti:d}
|
|
1155
|
+
{1:<9d} {1:<9d} {1:d}
|
|
1156
|
+
{self.ni:<9d} {self.nj:<9d} {1:d}
|
|
1157
|
+
</DataItem>
|
|
1158
|
+
<DataItem Dimensions="{self.ni:d} {self.nj:d} {self.nt:d}" NumberType="{dataset_numbertype_dict[scalar]}" Precision="{dataset_precision_dict[scalar]:d}" Format="HDF">
|
|
1159
|
+
{fname_base}:/{dset_hf_path}
|
|
1160
|
+
</DataItem>
|
|
1161
|
+
</DataItem>
|
|
1162
|
+
</Attribute>
|
|
1163
|
+
'''
|
|
1164
|
+
|
|
1165
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 8*' '))
|
|
1166
|
+
|
|
1167
|
+
# === .xdmf : end Grid for this timestep
|
|
1168
|
+
|
|
1169
|
+
xdmf_str='''
|
|
1170
|
+
</Grid>
|
|
1171
|
+
'''
|
|
1172
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 6*' '))
|
|
1173
|
+
|
|
1174
|
+
# ===
|
|
1175
|
+
|
|
1176
|
+
xdmf_str='''
|
|
1177
|
+
</Grid>
|
|
1178
|
+
</Domain>
|
|
1179
|
+
</Xdmf>
|
|
1180
|
+
'''
|
|
1181
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 0*' '))
|
|
1182
|
+
|
|
1183
|
+
if verbose: print('--w-> %s'%fname_xdmf_base)
|
|
1184
|
+
return
|
|
1185
|
+
|
|
1186
|
+
# ==================================================================
|
|
1187
|
+
# External attachments
|
|
1188
|
+
# ==================================================================
|
|
1189
|
+
|
|
1190
|
+
def init_from_eas4_wall(self, fn_eas4, **kwargs):
|
|
1191
|
+
return _init_from_eas4_wall(self, fn_eas4, **kwargs)
|
|
1192
|
+
|
|
1193
|
+
def import_eas4_wall(self, fn_eas4_list, **kwargs):
|
|
1194
|
+
return _import_eas4_wall(self, fn_eas4_list, **kwargs)
|
|
1195
|
+
|
|
1196
|
+
def calc_turb_cospectrum_wall(self, **kwargs):
|
|
1197
|
+
return _calc_turb_cospectrum_wall(self, **kwargs)
|
|
1198
|
+
|
|
1199
|
+
def calc_ccor_wall(self, **kwargs):
|
|
1200
|
+
return _calc_ccor_wall(self, **kwargs)
|
|
1201
|
+
|
|
1202
|
+
def calc_statistics_wall(self, **kwargs):
|
|
1203
|
+
return _calc_statistics_wall(self, **kwargs)
|
|
1204
|
+
|
|
1205
|
+
def calc_mean_uncertainty_BMBC(self, **kwargs):
|
|
1206
|
+
return _calc_mean_uncertainty_BMBC(self, **kwargs)
|