turbx 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- turbx/__init__.py +52 -0
- turbx/bl.py +620 -0
- turbx/blasius.py +64 -0
- turbx/cli.py +19 -0
- turbx/composite_profile.py +243 -0
- turbx/confidence_interval.py +64 -0
- turbx/eas3.py +420 -0
- turbx/eas4.py +567 -0
- turbx/fig_ax_constructor.py +52 -0
- turbx/freestream_parameters.py +268 -0
- turbx/gradient.py +391 -0
- turbx/grid_metric.py +272 -0
- turbx/h5.py +236 -0
- turbx/mvp.py +385 -0
- turbx/rgd.py +2693 -0
- turbx/rgd_mean.py +523 -0
- turbx/rgd_testing.py +354 -0
- turbx/rgd_xpln_ccor.py +701 -0
- turbx/rgd_xpln_coh.py +992 -0
- turbx/rgd_xpln_mean_dim.py +336 -0
- turbx/rgd_xpln_spectrum.py +940 -0
- turbx/rgd_xpln_stats.py +738 -0
- turbx/rgd_xpln_turb_budget.py +1193 -0
- turbx/set_mpl_env.py +85 -0
- turbx/signal.py +277 -0
- turbx/spd.py +1206 -0
- turbx/spd_wall_ccor.py +629 -0
- turbx/spd_wall_ci.py +406 -0
- turbx/spd_wall_import.py +676 -0
- turbx/spd_wall_spectrum.py +638 -0
- turbx/spd_wall_stats.py +618 -0
- turbx/utils.py +84 -0
- turbx/ztmd.py +2224 -0
- turbx/ztmd_analysis.py +2337 -0
- turbx/ztmd_loader.py +56 -0
- turbx-1.0.2.dist-info/LICENSE +21 -0
- turbx-1.0.2.dist-info/METADATA +120 -0
- turbx-1.0.2.dist-info/RECORD +41 -0
- turbx-1.0.2.dist-info/WHEEL +5 -0
- turbx-1.0.2.dist-info/entry_points.txt +2 -0
- turbx-1.0.2.dist-info/top_level.txt +1 -0
turbx/rgd.py
ADDED
|
@@ -0,0 +1,2693 @@
|
|
|
1
|
+
import io
|
|
2
|
+
import os
|
|
3
|
+
import re
|
|
4
|
+
import shutil
|
|
5
|
+
import subprocess
|
|
6
|
+
import sys
|
|
7
|
+
import textwrap
|
|
8
|
+
import time
|
|
9
|
+
import timeit
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
import h5py
|
|
13
|
+
import numpy as np
|
|
14
|
+
from mpi4py import MPI
|
|
15
|
+
from tqdm import tqdm
|
|
16
|
+
|
|
17
|
+
from .eas4 import eas4
|
|
18
|
+
from .h5 import h5_chunk_sizer, h5_ds_force_allocate_chunks
|
|
19
|
+
from .rgd_mean import _calc_mean
|
|
20
|
+
from .rgd_xpln_ccor import _calc_ccor_xpln
|
|
21
|
+
from .rgd_xpln_coh import _calc_wall_coh_xpln
|
|
22
|
+
from .rgd_xpln_mean_dim import _add_mean_dimensional_data_xpln
|
|
23
|
+
from .rgd_xpln_spectrum import _calc_turb_cospectrum_xpln
|
|
24
|
+
from .rgd_xpln_stats import _calc_statistics_xpln
|
|
25
|
+
from .rgd_xpln_turb_budget import _calc_turb_budget_xpln
|
|
26
|
+
from .utils import even_print, format_time_string
|
|
27
|
+
|
|
28
|
+
# ======================================================================
|
|
29
|
+
|
|
30
|
+
class rgd(h5py.File):
|
|
31
|
+
'''
|
|
32
|
+
Rectilinear Grid Data (RGD)
|
|
33
|
+
---------------------------
|
|
34
|
+
- super()'ed h5py.File class
|
|
35
|
+
- 4D dataset storage
|
|
36
|
+
- dimension coordinates are 4x 1D arrays defining [x,y,z,t]
|
|
37
|
+
|
|
38
|
+
to clear:
|
|
39
|
+
---------
|
|
40
|
+
> os.system('h5clear -s tmp.h5')
|
|
41
|
+
> hf = h5py.File('tmp.h5', 'r')
|
|
42
|
+
> hf.close()
|
|
43
|
+
|
|
44
|
+
Structure
|
|
45
|
+
---------
|
|
46
|
+
|
|
47
|
+
rgd.h5
|
|
48
|
+
│
|
|
49
|
+
├── dims/ --> 1D
|
|
50
|
+
│ └── x
|
|
51
|
+
│ └── y
|
|
52
|
+
│ └── z
|
|
53
|
+
│ └── t
|
|
54
|
+
│
|
|
55
|
+
└── data/<<scalar>> --> 4D [t,z,y,x]
|
|
56
|
+
|
|
57
|
+
'''
|
|
58
|
+
|
|
59
|
+
def __init__(self, *args, **kwargs):
|
|
60
|
+
|
|
61
|
+
self.fname, self.open_mode = args
|
|
62
|
+
|
|
63
|
+
self.fname_path = os.path.dirname(self.fname)
|
|
64
|
+
self.fname_base = os.path.basename(self.fname)
|
|
65
|
+
self.fname_root, self.fname_ext = os.path.splitext(self.fname_base)
|
|
66
|
+
|
|
67
|
+
## default to libver='latest' if none provided
|
|
68
|
+
if ('libver' not in kwargs):
|
|
69
|
+
kwargs['libver'] = 'latest'
|
|
70
|
+
|
|
71
|
+
## catch possible user error --> could prevent accidental EAS overwrites
|
|
72
|
+
if (self.fname_ext=='.eas'):
|
|
73
|
+
raise ValueError('EAS4 files should not be opened with turbx.rgd()')
|
|
74
|
+
|
|
75
|
+
## check if none-None communicator, but no driver='mpio'
|
|
76
|
+
if ('comm' in kwargs) and (kwargs['comm'] is not None) and ('driver' not in kwargs):
|
|
77
|
+
raise ValueError("comm is provided as not None, but driver='mpio' not provided")
|
|
78
|
+
|
|
79
|
+
## determine if using mpi
|
|
80
|
+
if ('driver' in kwargs) and (kwargs['driver']=='mpio'):
|
|
81
|
+
if ('comm' not in kwargs):
|
|
82
|
+
raise ValueError("if driver='mpio', then comm should be provided")
|
|
83
|
+
self.usingmpi = True
|
|
84
|
+
else:
|
|
85
|
+
self.usingmpi = False
|
|
86
|
+
|
|
87
|
+
## determine communicator & rank info
|
|
88
|
+
if self.usingmpi:
|
|
89
|
+
self.comm = kwargs['comm']
|
|
90
|
+
self.n_ranks = self.comm.Get_size()
|
|
91
|
+
self.rank = self.comm.Get_rank()
|
|
92
|
+
else:
|
|
93
|
+
self.comm = None
|
|
94
|
+
self.n_ranks = 1
|
|
95
|
+
self.rank = 0
|
|
96
|
+
if ('comm' in kwargs):
|
|
97
|
+
del kwargs['comm']
|
|
98
|
+
|
|
99
|
+
## rgd() unique kwargs (not h5py.File kwargs) --> pop() rather than get()
|
|
100
|
+
stripe_count = kwargs.pop('stripe_count' , 16 )
|
|
101
|
+
stripe_size_mb = kwargs.pop('stripe_size_mb' , 2 )
|
|
102
|
+
perms = kwargs.pop('perms' , '640' )
|
|
103
|
+
no_indep_rw = kwargs.pop('no_indep_rw' , False )
|
|
104
|
+
|
|
105
|
+
if not isinstance(stripe_count, int):
|
|
106
|
+
raise ValueError('stripe_count must be int')
|
|
107
|
+
if not isinstance(stripe_size_mb, int):
|
|
108
|
+
raise ValueError('stripe_size_mb must be int')
|
|
109
|
+
if not isinstance(perms, str) or len(perms)!=3 or not re.fullmatch(r'\d{3}',perms):
|
|
110
|
+
raise ValueError("perms must be 3-digit string like '660'")
|
|
111
|
+
|
|
112
|
+
## if not using MPI, remove 'driver' and 'comm' from kwargs
|
|
113
|
+
if ( not self.usingmpi ) and ('driver' in kwargs):
|
|
114
|
+
kwargs.pop('driver')
|
|
115
|
+
if ( not self.usingmpi ) and ('comm' in kwargs):
|
|
116
|
+
kwargs.pop('comm')
|
|
117
|
+
|
|
118
|
+
## | mpiexec --mca io romio321 -n $NP python3 ...
|
|
119
|
+
## | mpiexec --mca io ompio -n $NP python3 ...
|
|
120
|
+
## | ompi_info --> print ompi settings (grep 'MCA io' for I/O opts)
|
|
121
|
+
## | export ROMIO_FSTYPE_FORCE="lustre:" --> force Lustre driver over UFS when using ROMIO
|
|
122
|
+
## | export ROMIO_FSTYPE_FORCE="ufs:"
|
|
123
|
+
## | export ROMIO_PRINT_HINTS=1 --> show available hints
|
|
124
|
+
##
|
|
125
|
+
## https://doku.lrz.de/best-practices-hints-and-optimizations-for-io-10747318.html
|
|
126
|
+
##
|
|
127
|
+
## ## Using OMPIO
|
|
128
|
+
## export OMPI_MCA_sharedfp=^lockedfile,individual
|
|
129
|
+
## mpiexec --mca io ompio -n $NP python3 script.py
|
|
130
|
+
##
|
|
131
|
+
## ## Using Cray MPICH
|
|
132
|
+
## to print ROMIO hints : export MPICH_MPIIO_HINTS_DISPLAY=1
|
|
133
|
+
|
|
134
|
+
## set MPI hints, passed through 'mpi_info' dict
|
|
135
|
+
if self.usingmpi:
|
|
136
|
+
if ('info' in kwargs):
|
|
137
|
+
self.mpi_info = kwargs['info']
|
|
138
|
+
else:
|
|
139
|
+
mpi_info = MPI.Info.Create()
|
|
140
|
+
|
|
141
|
+
## ROMIO -- data sieving & collective buffering
|
|
142
|
+
mpi_info.Set('romio_ds_write' , 'disable' ) ## ds = data sieving
|
|
143
|
+
mpi_info.Set('romio_ds_read' , 'disable' )
|
|
144
|
+
#mpi_info.Set('romio_cb_read' , 'automatic' ) ## cb = collective buffering
|
|
145
|
+
#mpi_info.Set('romio_cb_write' , 'automatic' )
|
|
146
|
+
mpi_info.Set('romio_cb_read' , 'enable' ) ## cb = collective buffering
|
|
147
|
+
mpi_info.Set('romio_cb_write' , 'enable' )
|
|
148
|
+
|
|
149
|
+
## ROMIO -- collective buffer size
|
|
150
|
+
mpi_info.Set('cb_buffer_size' , str(int(round(1*1024**3))) ) ## 1 [GB]
|
|
151
|
+
|
|
152
|
+
## ROMIO -- force collective I/O
|
|
153
|
+
if no_indep_rw:
|
|
154
|
+
mpi_info.Set('romio_no_indep_rw' , 'true' )
|
|
155
|
+
|
|
156
|
+
## ROMIO -- N Aggregators
|
|
157
|
+
#mpi_info.Set('cb_nodes' , str(min(16,self.n_ranks//2)) )
|
|
158
|
+
mpi_info.Set('cb_nodes' , str(min(16,self.n_ranks)) )
|
|
159
|
+
|
|
160
|
+
## add to kwargs to be passed to h5py.File() at super() call
|
|
161
|
+
kwargs['info'] = mpi_info
|
|
162
|
+
self.mpi_info = mpi_info
|
|
163
|
+
|
|
164
|
+
# === HDF5 tuning factors (independent of MPI I/O driver)
|
|
165
|
+
|
|
166
|
+
## rdcc_w0 : preemption policy (weight) for HDF5's raw data chunk cache
|
|
167
|
+
## - influences how HDF5 evicts chunks from the per-process chunk cache
|
|
168
|
+
## - 1.0 favors retaining fully-read chunks (good for read-heavy access)
|
|
169
|
+
## - 0.0 favors recently-used chunks (better for partial writes)
|
|
170
|
+
if ('rdcc_w0' not in kwargs):
|
|
171
|
+
kwargs['rdcc_w0'] = 0.75
|
|
172
|
+
|
|
173
|
+
## rdcc_nbytes : maximum total size of the HDF5 raw chunk cache per dataset per process
|
|
174
|
+
if ('rdcc_nbytes' not in kwargs):
|
|
175
|
+
kwargs['rdcc_nbytes'] = int(1*1024**3) ## 1 [GB]
|
|
176
|
+
|
|
177
|
+
## rdcc_nslots : number of hash table slots in the raw data chunk cache
|
|
178
|
+
## - should be ~= ( rdcc_nbytes / chunk size )
|
|
179
|
+
if ('rdcc_nslots' not in kwargs):
|
|
180
|
+
#kwargs['rdcc_nslots'] = 16381 ## prime
|
|
181
|
+
kwargs['rdcc_nslots'] = kwargs['rdcc_nbytes'] // (2*1024**2) ## assume 2 [MB] chunks
|
|
182
|
+
#kwargs['rdcc_nslots'] = kwargs['rdcc_nbytes'] // (128*1024**2) ## assume 128 [MB] chunks
|
|
183
|
+
|
|
184
|
+
## rgd() unique kwargs (not h5py.File kwargs) --> pop() rather than get()
|
|
185
|
+
verbose = kwargs.pop( 'verbose' , False )
|
|
186
|
+
force = kwargs.pop( 'force' , False )
|
|
187
|
+
|
|
188
|
+
if not isinstance(verbose, bool):
|
|
189
|
+
raise ValueError
|
|
190
|
+
if not isinstance(force, bool):
|
|
191
|
+
raise ValueError
|
|
192
|
+
|
|
193
|
+
# === initialize file on FS
|
|
194
|
+
|
|
195
|
+
## if file open mode is 'w', the file exists, and force is False
|
|
196
|
+
## --> raise error
|
|
197
|
+
if (self.open_mode == 'w') and (force is False) and os.path.isfile(self.fname):
|
|
198
|
+
if (self.rank==0):
|
|
199
|
+
print('\n'+72*'-')
|
|
200
|
+
print(self.fname+' already exists! opening with \'w\' would overwrite.\n')
|
|
201
|
+
openModeInfoStr = '''
|
|
202
|
+
r : Read only, file must exist
|
|
203
|
+
r+ : Read/write, file must exist
|
|
204
|
+
w : Create file, truncate if exists
|
|
205
|
+
w- or x : Create file, fail if exists
|
|
206
|
+
a : Read/write if exists, create otherwise
|
|
207
|
+
|
|
208
|
+
or use force=True arg:
|
|
209
|
+
|
|
210
|
+
>>> with rgd(<<fname>>,'w',force=True) as f:
|
|
211
|
+
>>> ...
|
|
212
|
+
'''
|
|
213
|
+
print(textwrap.indent(textwrap.dedent(openModeInfoStr), 2*' ').strip('\n'))
|
|
214
|
+
print(72*'-'+'\n')
|
|
215
|
+
sys.stdout.flush()
|
|
216
|
+
|
|
217
|
+
if (self.comm is not None):
|
|
218
|
+
self.comm.Barrier()
|
|
219
|
+
raise FileExistsError
|
|
220
|
+
|
|
221
|
+
## if file open mode is 'w'
|
|
222
|
+
## --> <delete>, touch, chmod, stripe
|
|
223
|
+
if (self.open_mode == 'w'):
|
|
224
|
+
if (self.rank==0):
|
|
225
|
+
if os.path.isfile(self.fname): ## if the file exists, delete it
|
|
226
|
+
os.remove(self.fname)
|
|
227
|
+
time.sleep(1.)
|
|
228
|
+
Path(self.fname).touch() ## touch a new file
|
|
229
|
+
time.sleep(1.)
|
|
230
|
+
os.chmod(self.fname, int(perms, base=8)) ## change permissions
|
|
231
|
+
if shutil.which('lfs') is not None: ## set stripe if on Lustre
|
|
232
|
+
cmd_str_lfs_migrate = f'lfs migrate --stripe-count {stripe_count:d} --stripe-size {stripe_size_mb:d}M {self.fname} > /dev/null 2>&1'
|
|
233
|
+
return_code = subprocess.call(cmd_str_lfs_migrate, shell=True)
|
|
234
|
+
if (return_code != 0):
|
|
235
|
+
raise ValueError('lfs migrate failed')
|
|
236
|
+
time.sleep(1.)
|
|
237
|
+
|
|
238
|
+
if (self.comm is not None):
|
|
239
|
+
self.comm.Barrier()
|
|
240
|
+
|
|
241
|
+
## call actual h5py.File.__init__()
|
|
242
|
+
super(rgd, self).__init__(*args, **kwargs)
|
|
243
|
+
self.get_header(verbose=verbose)
|
|
244
|
+
|
|
245
|
+
def get_header(self,**kwargs):
|
|
246
|
+
'''
|
|
247
|
+
initialize header attributes of RGD class instance
|
|
248
|
+
--> this gets called automatically upon opening the file
|
|
249
|
+
'''
|
|
250
|
+
|
|
251
|
+
verbose = kwargs.get('verbose',True)
|
|
252
|
+
|
|
253
|
+
if (self.rank!=0):
|
|
254
|
+
verbose=False
|
|
255
|
+
|
|
256
|
+
# === attrs
|
|
257
|
+
|
|
258
|
+
if ('duration_avg' in self.attrs.keys()):
|
|
259
|
+
self.duration_avg = self.attrs['duration_avg']
|
|
260
|
+
# if ('rectilinear' in self.attrs.keys()):
|
|
261
|
+
# self.rectilinear = self.attrs['rectilinear']
|
|
262
|
+
# if ('curvilinear' in self.attrs.keys()):
|
|
263
|
+
# self.curvilinear = self.attrs['curvilinear']
|
|
264
|
+
|
|
265
|
+
## these should be set in the (init_from_() funcs)
|
|
266
|
+
if ('fclass' in self.attrs.keys()):
|
|
267
|
+
self.fclass = self.attrs['fclass'] ## 'rgd','cgd',...
|
|
268
|
+
if ('fsubtype' in self.attrs.keys()):
|
|
269
|
+
self.fsubtype = self.attrs['fsubtype'] ## 'unsteady','mean','prime',...
|
|
270
|
+
|
|
271
|
+
# === udef
|
|
272
|
+
|
|
273
|
+
header_attr_keys = [
|
|
274
|
+
'Ma','Re','Pr',
|
|
275
|
+
'kappa','R',
|
|
276
|
+
'p_inf','T_inf',
|
|
277
|
+
'S_Suth','mu_Suth_ref','T_Suth_ref',
|
|
278
|
+
]
|
|
279
|
+
|
|
280
|
+
header_attr_keys_derived = [
|
|
281
|
+
'C_Suth','mu_inf','rho_inf','nu_inf',
|
|
282
|
+
'a_inf','U_inf',
|
|
283
|
+
'cp','cv',
|
|
284
|
+
'recov_fac','Taw',
|
|
285
|
+
'lchar','tchar',
|
|
286
|
+
'uchar','M_inf',
|
|
287
|
+
]
|
|
288
|
+
|
|
289
|
+
## if all primary FS params are top-level HDF5 attributes, then read & assign as instance attributes
|
|
290
|
+
if all([ key in self.attrs for key in header_attr_keys ]):
|
|
291
|
+
for key in header_attr_keys:
|
|
292
|
+
setattr( self, key, self.attrs[key] )
|
|
293
|
+
|
|
294
|
+
## calculate derived freestream parameters and set them as instance attributes
|
|
295
|
+
self.C_Suth = self.mu_Suth_ref/(self.T_Suth_ref**(3/2))*(self.T_Suth_ref + self.S_Suth) ## [kg/(m·s·√K)]
|
|
296
|
+
self.mu_inf = self.mu_Suth_ref*(self.T_inf/self.T_Suth_ref)**(3/2) * ((self.T_Suth_ref+self.S_Suth)/(self.T_inf+self.S_Suth))
|
|
297
|
+
self.rho_inf = self.p_inf/(self.R*self.T_inf)
|
|
298
|
+
self.nu_inf = self.mu_inf/self.rho_inf
|
|
299
|
+
self.a_inf = np.sqrt(self.kappa*self.R*self.T_inf)
|
|
300
|
+
self.U_inf = self.Ma*self.a_inf
|
|
301
|
+
self.cp = self.R*self.kappa/(self.kappa-1.)
|
|
302
|
+
self.cv = self.cp/self.kappa
|
|
303
|
+
self.recov_fac = self.Pr**(1/3)
|
|
304
|
+
self.Taw = self.T_inf + self.recov_fac*self.U_inf**2/(2*self.cp)
|
|
305
|
+
self.lchar = self.Re*self.nu_inf/self.U_inf
|
|
306
|
+
self.tchar = self.lchar / self.U_inf
|
|
307
|
+
self.uchar = self.U_inf
|
|
308
|
+
self.M_inf = self.Ma
|
|
309
|
+
|
|
310
|
+
#if verbose: print(72*'-')
|
|
311
|
+
if verbose: even_print('Ma' , '%0.2f [-]' % self.Ma )
|
|
312
|
+
if verbose: even_print('Re' , '%0.1f [-]' % self.Re )
|
|
313
|
+
if verbose: even_print('Pr' , '%0.3f [-]' % self.Pr )
|
|
314
|
+
if verbose: even_print('T_inf' , '%0.3f [K]' % self.T_inf )
|
|
315
|
+
if verbose: even_print('p_inf' , '%0.1f [Pa]' % self.p_inf )
|
|
316
|
+
if verbose: even_print('kappa' , '%0.3f [-]' % self.kappa )
|
|
317
|
+
if verbose: even_print('R' , '%0.3f [J/(kg·K)]' % self.R )
|
|
318
|
+
if verbose: even_print('mu_Suth_ref' , '%0.6E [kg/(m·s)]' % self.mu_Suth_ref )
|
|
319
|
+
if verbose: even_print('T_Suth_ref' , '%0.2f [K]' % self.T_Suth_ref )
|
|
320
|
+
if verbose: even_print('S_Suth' , '%0.2f [K]' % self.S_Suth )
|
|
321
|
+
if verbose: even_print('C_Suth' , '%0.5e [kg/(m·s·√K)]' % self.C_Suth )
|
|
322
|
+
|
|
323
|
+
if verbose: print(72*'-')
|
|
324
|
+
if verbose: even_print('rho_inf' , '%0.3f [kg/m³]' % self.rho_inf )
|
|
325
|
+
if verbose: even_print('mu_inf' , '%0.6E [kg/(m·s)]' % self.mu_inf )
|
|
326
|
+
if verbose: even_print('nu_inf' , '%0.6E [m²/s]' % self.nu_inf )
|
|
327
|
+
if verbose: even_print('a_inf' , '%0.6f [m/s]' % self.a_inf )
|
|
328
|
+
if verbose: even_print('U_inf' , '%0.6f [m/s]' % self.U_inf )
|
|
329
|
+
if verbose: even_print('cp' , '%0.3f [J/(kg·K)]' % self.cp )
|
|
330
|
+
if verbose: even_print('cv' , '%0.3f [J/(kg·K)]' % self.cv )
|
|
331
|
+
#if verbose: even_print('recovery factor' , '%0.6f [-]' % self.recov_fac )
|
|
332
|
+
if verbose: even_print('Taw' , '%0.3f [K]' % self.Taw )
|
|
333
|
+
if verbose: even_print('lchar' , '%0.6E [m]' % self.lchar )
|
|
334
|
+
if verbose: even_print('tchar' , '%0.6E [s]' % self.tchar )
|
|
335
|
+
if verbose: print(72*'-')
|
|
336
|
+
#if verbose: print(72*'-'+'\n')
|
|
337
|
+
|
|
338
|
+
## assert that the derived values are equal to any HDF5 top-level attributes
|
|
339
|
+
for key in header_attr_keys_derived:
|
|
340
|
+
if (key in self.attrs): ## if is in HDF5 as top-level attribute
|
|
341
|
+
np.testing.assert_allclose( getattr(self,key), self.attrs[key], rtol=1e-10, atol=1e-10 )
|
|
342
|
+
|
|
343
|
+
## assign udef dict as instance attribute for convenience
|
|
344
|
+
self.udef = {
|
|
345
|
+
'Ma':self.Ma,
|
|
346
|
+
'Re':self.Re,
|
|
347
|
+
'Pr':self.Pr,
|
|
348
|
+
'kappa':self.kappa,
|
|
349
|
+
'R':self.R,
|
|
350
|
+
'p_inf':self.p_inf,
|
|
351
|
+
'T_inf':self.T_inf,
|
|
352
|
+
'S_Suth':self.S_Suth,
|
|
353
|
+
'mu_Suth_ref':self.mu_Suth_ref,
|
|
354
|
+
'T_Suth_ref':self.T_Suth_ref,
|
|
355
|
+
|
|
356
|
+
'C_Suth':self.C_Suth,
|
|
357
|
+
'mu_inf':self.mu_inf,
|
|
358
|
+
'rho_inf':self.rho_inf,
|
|
359
|
+
'nu_inf':self.nu_inf,
|
|
360
|
+
'a_inf':self.a_inf,
|
|
361
|
+
'U_inf':self.U_inf,
|
|
362
|
+
'cp':self.cp,
|
|
363
|
+
'cv':self.cv,
|
|
364
|
+
'recov_fac':self.recov_fac,
|
|
365
|
+
'Taw':self.Taw,
|
|
366
|
+
'lchar':self.lchar,
|
|
367
|
+
'tchar':self.tchar,
|
|
368
|
+
|
|
369
|
+
'uchar':self.uchar,
|
|
370
|
+
'M_inf':self.M_inf,
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
# ===
|
|
374
|
+
|
|
375
|
+
## OLD WAY using udef real/char here for backward compatibility
|
|
376
|
+
if not all([ key in self.attrs for key in header_attr_keys ]) and ('header/udef_real' in self) and ('header/udef_char' in self):
|
|
377
|
+
|
|
378
|
+
udef_real = np.copy(self['header/udef_real'][()])
|
|
379
|
+
udef_char = np.copy(self['header/udef_char'][()]) ## the unpacked numpy array of |S128 encoded fixed-length character objects
|
|
380
|
+
udef_char = [s.decode('utf-8') for s in udef_char] ## convert it to a python list of utf-8 strings
|
|
381
|
+
self.udef = dict(zip(udef_char, udef_real)) ## make dict where keys are udef_char and values are udef_real
|
|
382
|
+
|
|
383
|
+
# === characteristic values
|
|
384
|
+
|
|
385
|
+
self.Ma = self.udef['Ma']
|
|
386
|
+
self.Re = self.udef['Re']
|
|
387
|
+
self.Pr = self.udef['Pr']
|
|
388
|
+
self.kappa = self.udef['kappa']
|
|
389
|
+
self.R = self.udef['R']
|
|
390
|
+
self.p_inf = self.udef['p_inf']
|
|
391
|
+
self.T_inf = self.udef['T_inf']
|
|
392
|
+
self.mu_Suth_ref = self.udef['mu_Suth_ref']
|
|
393
|
+
self.T_Suth_ref = self.udef['T_Suth_ref']
|
|
394
|
+
self.S_Suth = self.udef['S_Suth']
|
|
395
|
+
|
|
396
|
+
self.C_Suth = self.mu_Suth_ref/(self.T_Suth_ref**(3/2))*(self.T_Suth_ref + self.S_Suth) ## [kg/(m·s·√K)]
|
|
397
|
+
|
|
398
|
+
#if verbose: print(72*'-')
|
|
399
|
+
if verbose: even_print('Ma' , '%0.2f [-]' % self.Ma )
|
|
400
|
+
if verbose: even_print('Re' , '%0.1f [-]' % self.Re )
|
|
401
|
+
if verbose: even_print('Pr' , '%0.3f [-]' % self.Pr )
|
|
402
|
+
if verbose: even_print('T_inf' , '%0.3f [K]' % self.T_inf )
|
|
403
|
+
if verbose: even_print('p_inf' , '%0.1f [Pa]' % self.p_inf )
|
|
404
|
+
if verbose: even_print('kappa' , '%0.3f [-]' % self.kappa )
|
|
405
|
+
if verbose: even_print('R' , '%0.3f [J/(kg·K)]' % self.R )
|
|
406
|
+
if verbose: even_print('mu_Suth_ref' , '%0.6E [kg/(m·s)]' % self.mu_Suth_ref )
|
|
407
|
+
if verbose: even_print('T_Suth_ref' , '%0.2f [K]' % self.T_Suth_ref )
|
|
408
|
+
if verbose: even_print('S_Suth' , '%0.2f [K]' % self.S_Suth )
|
|
409
|
+
if verbose: even_print('C_Suth' , '%0.5e [kg/(m·s·√K)]' % self.C_Suth )
|
|
410
|
+
|
|
411
|
+
# === characteristic values : derived
|
|
412
|
+
|
|
413
|
+
## mu_inf_1 = 14.58e-7*self.T_inf**1.5/(self.T_inf+110.4)
|
|
414
|
+
## mu_inf_2 = self.mu_Suth_ref*(self.T_inf/self.T_Suth_ref)**(3/2) * ((self.T_Suth_ref+self.S_Suth)/(self.T_inf+self.S_Suth))
|
|
415
|
+
## mu_inf_3 = self.C_Suth*self.T_inf**(3/2)/(self.T_inf+self.S_Suth)
|
|
416
|
+
## if not np.isclose(mu_inf_1, mu_inf_2, rtol=1e-14):
|
|
417
|
+
## raise AssertionError('inconsistency in Sutherland calc --> check')
|
|
418
|
+
## if not np.isclose(mu_inf_2, mu_inf_3, rtol=1e-14):
|
|
419
|
+
## raise AssertionError('inconsistency in Sutherland calc --> check')
|
|
420
|
+
## mu_inf = self.mu_inf = mu_inf_2
|
|
421
|
+
|
|
422
|
+
self.mu_inf = self.mu_Suth_ref*(self.T_inf/self.T_Suth_ref)**(3/2) * ((self.T_Suth_ref+self.S_Suth)/(self.T_inf+self.S_Suth))
|
|
423
|
+
self.rho_inf = self.p_inf/(self.R*self.T_inf)
|
|
424
|
+
self.nu_inf = self.mu_inf/self.rho_inf
|
|
425
|
+
self.a_inf = np.sqrt(self.kappa*self.R*self.T_inf)
|
|
426
|
+
self.U_inf = self.Ma*self.a_inf
|
|
427
|
+
self.cp = self.R*self.kappa/(self.kappa-1.)
|
|
428
|
+
self.cv = self.cp/self.kappa
|
|
429
|
+
self.recov_fac = self.Pr**(1/3)
|
|
430
|
+
self.Taw = self.T_inf + self.recov_fac*self.U_inf**2/(2*self.cp)
|
|
431
|
+
self.lchar = self.Re*self.nu_inf/self.U_inf
|
|
432
|
+
|
|
433
|
+
self.tchar = self.lchar / self.U_inf
|
|
434
|
+
self.uchar = self.U_inf
|
|
435
|
+
self.M_inf = self.Ma
|
|
436
|
+
|
|
437
|
+
if verbose: print(72*'-')
|
|
438
|
+
if verbose: even_print('rho_inf' , '%0.3f [kg/m³]' % self.rho_inf )
|
|
439
|
+
if verbose: even_print('mu_inf' , '%0.6E [kg/(m·s)]' % self.mu_inf )
|
|
440
|
+
if verbose: even_print('nu_inf' , '%0.6E [m²/s]' % self.nu_inf )
|
|
441
|
+
if verbose: even_print('a_inf' , '%0.6f [m/s]' % self.a_inf )
|
|
442
|
+
if verbose: even_print('U_inf' , '%0.6f [m/s]' % self.U_inf )
|
|
443
|
+
if verbose: even_print('cp' , '%0.3f [J/(kg·K)]' % self.cp )
|
|
444
|
+
if verbose: even_print('cv' , '%0.3f [J/(kg·K)]' % self.cv )
|
|
445
|
+
if verbose: even_print('recovery factor' , '%0.6f [-]' % self.recov_fac )
|
|
446
|
+
if verbose: even_print('Taw' , '%0.3f [K]' % self.Taw )
|
|
447
|
+
if verbose: even_print('lchar' , '%0.6E [m]' % self.lchar )
|
|
448
|
+
if verbose: even_print('tchar' , '%0.6E [s]' % self.tchar )
|
|
449
|
+
if verbose: print(72*'-')
|
|
450
|
+
#if verbose: print(72*'-'+'\n')
|
|
451
|
+
|
|
452
|
+
# # === write the 'derived' udef variables to a dict attribute of the RGD instance
|
|
453
|
+
# self.udef_deriv = { 'rho_inf':self.rho_inf,
|
|
454
|
+
# 'mu_inf':self.mu_inf,
|
|
455
|
+
# 'nu_inf':self.nu_inf,
|
|
456
|
+
# 'a_inf':self.a_inf,
|
|
457
|
+
# 'U_inf':self.U_inf,
|
|
458
|
+
# 'cp':self.cp,
|
|
459
|
+
# 'cv':self.cv,
|
|
460
|
+
# 'recov_fac':self.recov_fac,
|
|
461
|
+
# 'Taw':self.Taw,
|
|
462
|
+
# 'lchar':self.lchar,
|
|
463
|
+
# }
|
|
464
|
+
|
|
465
|
+
## assign udef dict as instance attribute for convenience
|
|
466
|
+
self.udef = {
|
|
467
|
+
'Ma':self.Ma,
|
|
468
|
+
'Re':self.Re,
|
|
469
|
+
'Pr':self.Pr,
|
|
470
|
+
'kappa':self.kappa,
|
|
471
|
+
'R':self.R,
|
|
472
|
+
'p_inf':self.p_inf,
|
|
473
|
+
'T_inf':self.T_inf,
|
|
474
|
+
'S_Suth':self.S_Suth,
|
|
475
|
+
'mu_Suth_ref':self.mu_Suth_ref,
|
|
476
|
+
'T_Suth_ref':self.T_Suth_ref,
|
|
477
|
+
|
|
478
|
+
'C_Suth':self.C_Suth,
|
|
479
|
+
'mu_inf':self.mu_inf,
|
|
480
|
+
'rho_inf':self.rho_inf,
|
|
481
|
+
'nu_inf':self.nu_inf,
|
|
482
|
+
'a_inf':self.a_inf,
|
|
483
|
+
'U_inf':self.U_inf,
|
|
484
|
+
'cp':self.cp,
|
|
485
|
+
'cv':self.cv,
|
|
486
|
+
'recov_fac':self.recov_fac,
|
|
487
|
+
'Taw':self.Taw,
|
|
488
|
+
'lchar':self.lchar,
|
|
489
|
+
'tchar':self.tchar,
|
|
490
|
+
|
|
491
|
+
'uchar':self.uchar,
|
|
492
|
+
'M_inf':self.M_inf,
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
# === coordinate vectors
|
|
496
|
+
|
|
497
|
+
if all([('dims/x' in self),('dims/y' in self),('dims/z' in self)]):
|
|
498
|
+
|
|
499
|
+
x = self.x = np.copy(self['dims/x'][()])
|
|
500
|
+
y = self.y = np.copy(self['dims/y'][()])
|
|
501
|
+
z = self.z = np.copy(self['dims/z'][()])
|
|
502
|
+
nx = self.nx = x.size
|
|
503
|
+
ny = self.ny = y.size
|
|
504
|
+
nz = self.nz = z.size
|
|
505
|
+
ngp = self.ngp = nx*ny*nz
|
|
506
|
+
|
|
507
|
+
#if verbose: print(72*'-')
|
|
508
|
+
if verbose: even_print('nx', '%i'%nx )
|
|
509
|
+
if verbose: even_print('ny', '%i'%ny )
|
|
510
|
+
if verbose: even_print('nz', '%i'%nz )
|
|
511
|
+
if verbose: even_print('ngp', '%i'%ngp )
|
|
512
|
+
if verbose: print(72*'-')
|
|
513
|
+
|
|
514
|
+
if verbose: even_print('x_min', '%0.2f'%x.min())
|
|
515
|
+
if verbose: even_print('x_max', '%0.2f'%x.max())
|
|
516
|
+
if (self.nx>2):
|
|
517
|
+
if verbose: even_print('dx begin : end', '%0.3E : %0.3E'%( (x[1]-x[0]), (x[-1]-x[-2]) ))
|
|
518
|
+
if verbose: even_print('y_min', '%0.2f'%y.min())
|
|
519
|
+
if verbose: even_print('y_max', '%0.2f'%y.max())
|
|
520
|
+
if (self.ny>2):
|
|
521
|
+
if verbose: even_print('dy begin : end', '%0.3E : %0.3E'%( (y[1]-y[0]), (y[-1]-y[-2]) ))
|
|
522
|
+
if verbose: even_print('z_min', '%0.2f'%z.min())
|
|
523
|
+
if verbose: even_print('z_max', '%0.2f'%z.max())
|
|
524
|
+
if (self.nz>2):
|
|
525
|
+
if verbose: even_print('dz begin : end', '%0.3E : %0.3E'%( (z[1]-z[0]), (z[-1]-z[-2]) ))
|
|
526
|
+
#if verbose: print(72*'-'+'\n')
|
|
527
|
+
if verbose: print(72*'-')
|
|
528
|
+
|
|
529
|
+
else:
|
|
530
|
+
pass
|
|
531
|
+
|
|
532
|
+
# === 1D grid filters
|
|
533
|
+
|
|
534
|
+
self.hasGridFilter=False
|
|
535
|
+
if ('dims/xfi' in self):
|
|
536
|
+
self.xfi = np.copy(self['dims/xfi'][()])
|
|
537
|
+
if not np.array_equal(self.xfi, np.arange(nx,dtype=np.int64)):
|
|
538
|
+
self.hasGridFilter=True
|
|
539
|
+
if ('dims/xfiR' in self):
|
|
540
|
+
self.xfiR = np.copy(self['dims/xfiR'][()])
|
|
541
|
+
if ('dims/yfi' in self):
|
|
542
|
+
self.yfi = np.copy(self['dims/yfi'][()])
|
|
543
|
+
if not np.array_equal(self.yfi, np.arange(ny,dtype=np.int64)):
|
|
544
|
+
self.hasGridFilter=True
|
|
545
|
+
if ('dims/yfiR' in self):
|
|
546
|
+
self.yfiR = np.copy(self['dims/yfiR'][()])
|
|
547
|
+
if ('dims/zfi' in self):
|
|
548
|
+
self.zfi = np.copy(self['dims/zfi'][()])
|
|
549
|
+
if not np.array_equal(self.zfi, np.arange(nz,dtype=np.int64)):
|
|
550
|
+
self.hasGridFilter=True
|
|
551
|
+
if ('dims/zfiR' in self):
|
|
552
|
+
self.zfiR = np.copy(self['dims/zfiR'][()])
|
|
553
|
+
|
|
554
|
+
# === time vector
|
|
555
|
+
|
|
556
|
+
if ('dims/t' in self):
|
|
557
|
+
self.t = np.copy(self['dims/t'][()])
|
|
558
|
+
|
|
559
|
+
if ('data' in self): ## check t dim and data arr agree
|
|
560
|
+
nt,_,_,_ = self['data/%s'%list(self['data'].keys())[0]].shape ## 4D
|
|
561
|
+
if (nt!=self.t.size):
|
|
562
|
+
raise AssertionError('nt!=self.t.size : %i!=%i'%(nt,self.t.size))
|
|
563
|
+
|
|
564
|
+
nt = self.t.size
|
|
565
|
+
|
|
566
|
+
try:
|
|
567
|
+
self.dt = self.t[1] - self.t[0]
|
|
568
|
+
except IndexError:
|
|
569
|
+
self.dt = 0.
|
|
570
|
+
|
|
571
|
+
self.nt = self.t.size
|
|
572
|
+
self.duration = self.t[-1] - self.t[0]
|
|
573
|
+
self.ti = np.array(range(self.nt), dtype=np.int64)
|
|
574
|
+
|
|
575
|
+
elif all([('data' in self),('dims/t' not in self)]): ## data but no time
|
|
576
|
+
self.scalars = list(self['data'].keys())
|
|
577
|
+
nt,_,_,_ = self['data/%s'%self.scalars[0]].shape
|
|
578
|
+
self.nt = nt
|
|
579
|
+
self.t = np.arange(self.nt, dtype=np.float64)
|
|
580
|
+
self.ti = np.arange(self.nt, dtype=np.int64)
|
|
581
|
+
self.dt = 1.
|
|
582
|
+
self.duration = self.t[-1]-self.t[0]
|
|
583
|
+
|
|
584
|
+
else: ## no data, no time
|
|
585
|
+
self.t = np.array([], dtype=np.float64)
|
|
586
|
+
self.ti = np.array([], dtype=np.int64)
|
|
587
|
+
self.nt = nt = 0
|
|
588
|
+
self.dt = 0.
|
|
589
|
+
self.duration = 0.
|
|
590
|
+
|
|
591
|
+
#if verbose: print(72*'-')
|
|
592
|
+
if verbose: even_print('nt', '%i'%self.nt )
|
|
593
|
+
if verbose: even_print('dt', '%0.6f'%self.dt)
|
|
594
|
+
if verbose: even_print('duration', '%0.2f'%self.duration )
|
|
595
|
+
if hasattr(self, 'duration_avg'):
|
|
596
|
+
if verbose: even_print('duration_avg', '%0.2f'%self.duration_avg )
|
|
597
|
+
#if verbose: print(72*'-'+'\n')
|
|
598
|
+
|
|
599
|
+
# if hasattr(self,'rectilinear'):
|
|
600
|
+
# if verbose: even_print('rectilinear', str(self.rectilinear) )
|
|
601
|
+
# if hasattr(self,'curvilinear'):
|
|
602
|
+
# if verbose: even_print('curvilinear', str(self.curvilinear) )
|
|
603
|
+
|
|
604
|
+
# === ts group names & scalars
|
|
605
|
+
|
|
606
|
+
if ('data' in self):
|
|
607
|
+
self.scalars = list(self['data'].keys()) ## 4D : string names of scalars : ['u','v','w'],...
|
|
608
|
+
self.n_scalars = len(self.scalars)
|
|
609
|
+
self.scalars_dtypes = []
|
|
610
|
+
for scalar in self.scalars:
|
|
611
|
+
self.scalars_dtypes.append(self[f'data/{scalar}'].dtype)
|
|
612
|
+
self.scalars_dtypes_dict = dict(zip(self.scalars, self.scalars_dtypes)) ## dict {<<scalar>>: <<dtype>>}
|
|
613
|
+
else:
|
|
614
|
+
self.scalars = []
|
|
615
|
+
self.n_scalars = 0
|
|
616
|
+
self.scalars_dtypes = []
|
|
617
|
+
self.scalars_dtypes_dict = dict(zip(self.scalars, self.scalars_dtypes))
|
|
618
|
+
|
|
619
|
+
return
|
|
620
|
+
|
|
621
|
+
def init_from_eas4(self, fn_eas4, **kwargs):
|
|
622
|
+
'''
|
|
623
|
+
initialize an RGD from an EAS4 (NS3D output format)
|
|
624
|
+
-----
|
|
625
|
+
- x_min/max xi_min/max : min/max coord/index
|
|
626
|
+
- stride filters (sx,sy,sz)
|
|
627
|
+
'''
|
|
628
|
+
|
|
629
|
+
#EAS4_NO_G=1; EAS4_X0DX_G=2; EAS4_UDEF_G=3; EAS4_ALL_G=4; EAS4_FULL_G=5
|
|
630
|
+
gmode_dict = {1:'EAS4_NO_G', 2:'EAS4_X0DX_G', 3:'EAS4_UDEF_G', 4:'EAS4_ALL_G', 5:'EAS4_FULL_G'}
|
|
631
|
+
|
|
632
|
+
verbose = kwargs.get('verbose',True)
|
|
633
|
+
if (self.rank!=0):
|
|
634
|
+
verbose=False
|
|
635
|
+
|
|
636
|
+
## spatial resolution filter : take every nth grid point
|
|
637
|
+
sx = kwargs.get('sx',1)
|
|
638
|
+
sy = kwargs.get('sy',1)
|
|
639
|
+
sz = kwargs.get('sz',1)
|
|
640
|
+
|
|
641
|
+
## spatial resolution filter : set x/y/z bounds
|
|
642
|
+
x_min = kwargs.get('x_min',None)
|
|
643
|
+
y_min = kwargs.get('y_min',None)
|
|
644
|
+
z_min = kwargs.get('z_min',None)
|
|
645
|
+
|
|
646
|
+
x_max = kwargs.get('x_max',None)
|
|
647
|
+
y_max = kwargs.get('y_max',None)
|
|
648
|
+
z_max = kwargs.get('z_max',None)
|
|
649
|
+
|
|
650
|
+
xi_min = kwargs.get('xi_min',None)
|
|
651
|
+
yi_min = kwargs.get('yi_min',None)
|
|
652
|
+
zi_min = kwargs.get('zi_min',None)
|
|
653
|
+
|
|
654
|
+
xi_max = kwargs.get('xi_max',None)
|
|
655
|
+
yi_max = kwargs.get('yi_max',None)
|
|
656
|
+
zi_max = kwargs.get('zi_max',None)
|
|
657
|
+
|
|
658
|
+
## set default attributes
|
|
659
|
+
self.attrs['fsubtype'] = 'unsteady'
|
|
660
|
+
self.attrs['fclass'] = 'rgd'
|
|
661
|
+
|
|
662
|
+
if verbose: print('\n'+'rgd.init_from_eas4()'+'\n'+72*'-')
|
|
663
|
+
|
|
664
|
+
if not (os.path.isfile(fn_eas4) or (os.path.islink(fn_eas4) and os.path.isfile(os.path.realpath(fn_eas4)))):
|
|
665
|
+
raise FileNotFoundError(f'{fn_eas4} is not a file or a symlink to an existing file')
|
|
666
|
+
|
|
667
|
+
with eas4(fn_eas4, 'r', verbose=False, driver=self.driver, comm=self.comm) as hf_eas4:
|
|
668
|
+
|
|
669
|
+
if verbose: even_print('infile', os.path.basename(fn_eas4))
|
|
670
|
+
if verbose: even_print('infile size', '%0.2f [GB]'%(os.path.getsize(fn_eas4)/1024**3))
|
|
671
|
+
|
|
672
|
+
if verbose: even_print( 'gmode dim1' , '%i / %s'%( hf_eas4.gmode_dim1, gmode_dict[hf_eas4.gmode_dim1] ) )
|
|
673
|
+
if verbose: even_print( 'gmode dim2' , '%i / %s'%( hf_eas4.gmode_dim2, gmode_dict[hf_eas4.gmode_dim2] ) )
|
|
674
|
+
if verbose: even_print( 'gmode dim3' , '%i / %s'%( hf_eas4.gmode_dim3, gmode_dict[hf_eas4.gmode_dim3] ) )
|
|
675
|
+
|
|
676
|
+
## check gmode (RGD should not have more than ALL_G/4 on any dim)
|
|
677
|
+
if (hf_eas4.gmode_dim1 > 4):
|
|
678
|
+
raise ValueError('turbx.rgd cannot handle gmode > 4 (EAS4 gmode_dim1=%i)'%hf_eas4.gmode_dim1)
|
|
679
|
+
if (hf_eas4.gmode_dim2 > 4):
|
|
680
|
+
raise ValueError('turbx.rgd cannot handle gmode > 4 (EAS4 gmode_dim2=%i)'%hf_eas4.gmode_dim2)
|
|
681
|
+
if (hf_eas4.gmode_dim3 > 4):
|
|
682
|
+
raise ValueError('turbx.rgd cannot handle gmode > 4 (EAS4 gmode_dim3=%i)'%hf_eas4.gmode_dim3)
|
|
683
|
+
|
|
684
|
+
if verbose: even_print( 'nx' , f'{hf_eas4.nx:d}' )
|
|
685
|
+
if verbose: even_print( 'ny' , f'{hf_eas4.ny:d}' )
|
|
686
|
+
if verbose: even_print( 'nz' , f'{hf_eas4.nz:d}' )
|
|
687
|
+
if verbose: print(72*'-')
|
|
688
|
+
if verbose: even_print('outfile', self.fname)
|
|
689
|
+
|
|
690
|
+
# === copy over freestream parameters
|
|
691
|
+
|
|
692
|
+
header_attr_keys = [
|
|
693
|
+
'Ma','Re','Pr',
|
|
694
|
+
'kappa','R',
|
|
695
|
+
'p_inf','T_inf',
|
|
696
|
+
'S_Suth','mu_Suth_ref','T_Suth_ref',
|
|
697
|
+
]
|
|
698
|
+
|
|
699
|
+
## assert that top-level attributes don't already exist
|
|
700
|
+
#if any([ key in self.attrs for key in header_attr_keys ]):
|
|
701
|
+
# raise ValueError('some udef keys are already present in target file.')
|
|
702
|
+
|
|
703
|
+
## udef dict from EAS4
|
|
704
|
+
udef = hf_eas4.udef
|
|
705
|
+
|
|
706
|
+
## strip dict into 2x arrays (keys,values) and save to HDF5
|
|
707
|
+
udef_real = list(udef.values())
|
|
708
|
+
udef_char = list(udef.keys())
|
|
709
|
+
udef_real_h5 = np.array(udef_real, dtype=np.float64)
|
|
710
|
+
udef_char_h5 = np.array([s.encode('ascii', 'ignore') for s in udef_char], dtype='S128')
|
|
711
|
+
|
|
712
|
+
if ('header/udef_real' in self):
|
|
713
|
+
del self['header/udef_real']
|
|
714
|
+
if ('header/udef_char' in self):
|
|
715
|
+
del self['header/udef_char']
|
|
716
|
+
|
|
717
|
+
self.create_dataset('header/udef_real', data=udef_real_h5, dtype=np.float64)
|
|
718
|
+
self.create_dataset('header/udef_char', data=udef_char_h5, dtype='S128')
|
|
719
|
+
|
|
720
|
+
## assert that all primary udef keys are available in EAS4
|
|
721
|
+
## --> this could be fed into 'freestream_parameters()' instead
|
|
722
|
+
for key in header_attr_keys:
|
|
723
|
+
if key not in udef.keys():
|
|
724
|
+
raise ValueError(f"key '{key}' not found in udef of {fn_eas4}")
|
|
725
|
+
|
|
726
|
+
## write (primary) udef members as top-level attributes of HDF5 file
|
|
727
|
+
for key in header_attr_keys:
|
|
728
|
+
self.attrs[key] = udef[key]
|
|
729
|
+
|
|
730
|
+
## standard freestream parameters
|
|
731
|
+
Ma = udef['Ma']
|
|
732
|
+
Re = udef['Re']
|
|
733
|
+
Pr = udef['Pr']
|
|
734
|
+
kappa = udef['kappa']
|
|
735
|
+
R = udef['R']
|
|
736
|
+
p_inf = udef['p_inf']
|
|
737
|
+
T_inf = udef['T_inf']
|
|
738
|
+
S_Suth = udef['S_Suth']
|
|
739
|
+
mu_Suth_ref = udef['mu_Suth_ref']
|
|
740
|
+
T_Suth_ref = udef['T_Suth_ref']
|
|
741
|
+
|
|
742
|
+
## compute derived freestream parameters
|
|
743
|
+
C_Suth = mu_Suth_ref/(T_Suth_ref**(3/2))*(T_Suth_ref + S_Suth) ## [kg/(m·s·√K)]
|
|
744
|
+
mu_inf = mu_Suth_ref*(T_inf/T_Suth_ref)**(3/2) * ((T_Suth_ref+S_Suth)/(T_inf+S_Suth))
|
|
745
|
+
rho_inf = p_inf/(R*T_inf)
|
|
746
|
+
nu_inf = mu_inf/rho_inf
|
|
747
|
+
a_inf = np.sqrt(kappa*R*T_inf)
|
|
748
|
+
U_inf = Ma*a_inf
|
|
749
|
+
cp = R*kappa/(kappa-1.)
|
|
750
|
+
cv = cp/kappa
|
|
751
|
+
recov_fac = Pr**(1/3)
|
|
752
|
+
Taw = T_inf + recov_fac*U_inf**2/(2*cp)
|
|
753
|
+
lchar = Re*nu_inf/U_inf
|
|
754
|
+
tchar = lchar / U_inf
|
|
755
|
+
|
|
756
|
+
## convenience
|
|
757
|
+
uchar = U_inf
|
|
758
|
+
M_inf = Ma
|
|
759
|
+
|
|
760
|
+
## write (derived) freestream parameters as top-level attributes of HDF5 file
|
|
761
|
+
self.attrs['C_Suth'] = C_Suth
|
|
762
|
+
self.attrs['mu_inf'] = mu_inf
|
|
763
|
+
self.attrs['rho_inf'] = rho_inf
|
|
764
|
+
self.attrs['nu_inf'] = nu_inf
|
|
765
|
+
self.attrs['a_inf'] = a_inf
|
|
766
|
+
self.attrs['U_inf'] = U_inf
|
|
767
|
+
self.attrs['cp'] = cp
|
|
768
|
+
self.attrs['cv'] = cv
|
|
769
|
+
self.attrs['recov_fac'] = recov_fac
|
|
770
|
+
self.attrs['Taw'] = Taw
|
|
771
|
+
self.attrs['lchar'] = lchar
|
|
772
|
+
self.attrs['tchar'] = tchar
|
|
773
|
+
self.attrs['uchar'] = uchar
|
|
774
|
+
self.attrs['M_inf'] = M_inf
|
|
775
|
+
|
|
776
|
+
# === copy over dims info
|
|
777
|
+
|
|
778
|
+
if all([('dims/x' in self),('dims/y' in self),('dims/z' in self)]):
|
|
779
|
+
pass
|
|
780
|
+
## future: 2D/3D handling here
|
|
781
|
+
else:
|
|
782
|
+
|
|
783
|
+
x = np.copy( hf_eas4.x )
|
|
784
|
+
y = np.copy( hf_eas4.y )
|
|
785
|
+
z = np.copy( hf_eas4.z )
|
|
786
|
+
|
|
787
|
+
nx = x.size
|
|
788
|
+
ny = y.size
|
|
789
|
+
nz = z.size
|
|
790
|
+
ngp = nx*ny*nz
|
|
791
|
+
|
|
792
|
+
if any([
|
|
793
|
+
(xi_min is not None),
|
|
794
|
+
(xi_max is not None),
|
|
795
|
+
(yi_min is not None),
|
|
796
|
+
(yi_max is not None),
|
|
797
|
+
(zi_min is not None),
|
|
798
|
+
(zi_max is not None),
|
|
799
|
+
(x_min is not None),
|
|
800
|
+
(x_max is not None),
|
|
801
|
+
(y_min is not None),
|
|
802
|
+
(y_max is not None),
|
|
803
|
+
(z_min is not None),
|
|
804
|
+
(z_max is not None),
|
|
805
|
+
(sx!=1),
|
|
806
|
+
(sy!=1),
|
|
807
|
+
(sz!=1),
|
|
808
|
+
]):
|
|
809
|
+
hasFilters=True
|
|
810
|
+
else:
|
|
811
|
+
hasFilters=False
|
|
812
|
+
|
|
813
|
+
if hasFilters and verbose:
|
|
814
|
+
print(72*'-')
|
|
815
|
+
msg = 'Filtered Dim Info'
|
|
816
|
+
print(msg+'\n'+len(msg)*'-')
|
|
817
|
+
|
|
818
|
+
## READ boolean arrays for each axis
|
|
819
|
+
xfiR = np.full(nx,False,dtype=bool)
|
|
820
|
+
yfiR = np.full(ny,False,dtype=bool)
|
|
821
|
+
zfiR = np.full(nz,False,dtype=bool)
|
|
822
|
+
|
|
823
|
+
## index arrays along each axis --> these get overwritten depending on filter choices
|
|
824
|
+
xfi = np.arange(nx,dtype=np.int64)
|
|
825
|
+
yfi = np.arange(ny,dtype=np.int64)
|
|
826
|
+
zfi = np.arange(nz,dtype=np.int64)
|
|
827
|
+
|
|
828
|
+
## total bounds clip (physical nondimensional distance)
|
|
829
|
+
if (x_min is not None):
|
|
830
|
+
xfi = np.array([i for i in xfi if (x[i] >= x_min)])
|
|
831
|
+
if verbose: even_print('x_min', '%0.3f'%x_min)
|
|
832
|
+
if (x_max is not None):
|
|
833
|
+
xfi = np.array([i for i in xfi if (x[i] <= x_max)])
|
|
834
|
+
if verbose: even_print('x_max', '%0.3f'%x_max)
|
|
835
|
+
if (y_min is not None):
|
|
836
|
+
yfi = np.array([i for i in yfi if (y[i] >= y_min)])
|
|
837
|
+
if verbose: even_print('y_min', '%0.3f'%y_min)
|
|
838
|
+
if (y_max is not None):
|
|
839
|
+
yfi = np.array([i for i in yfi if (y[i] <= y_max)])
|
|
840
|
+
if verbose: even_print('y_max', '%0.3f'%y_max)
|
|
841
|
+
if (z_min is not None):
|
|
842
|
+
zfi = np.array([i for i in zfi if (z[i] >= z_min)])
|
|
843
|
+
if verbose: even_print('z_min', '%0.3f'%z_min)
|
|
844
|
+
if (z_max is not None):
|
|
845
|
+
zfi = np.array([i for i in zfi if (z[i] <= z_max)])
|
|
846
|
+
if verbose: even_print('z_max', '%0.3f'%z_max)
|
|
847
|
+
|
|
848
|
+
# === total bounds clip (coordinate index)
|
|
849
|
+
|
|
850
|
+
if (xi_min is not None):
|
|
851
|
+
|
|
852
|
+
xfi_ = []
|
|
853
|
+
if verbose: even_print('xi_min', '%i'%xi_min)
|
|
854
|
+
for c in xfi:
|
|
855
|
+
if (xi_min<0) and (c>=(nx+xi_min)): ## support negative indexing
|
|
856
|
+
xfi_.append(c)
|
|
857
|
+
elif (xi_min>=0) and (c>=xi_min):
|
|
858
|
+
xfi_.append(c)
|
|
859
|
+
xfi=np.array(xfi_, dtype=np.int64)
|
|
860
|
+
|
|
861
|
+
if (xi_max is not None):
|
|
862
|
+
|
|
863
|
+
xfi_ = []
|
|
864
|
+
if verbose: even_print('xi_max', '%i'%xi_max)
|
|
865
|
+
for c in xfi:
|
|
866
|
+
if (xi_max<0) and (c<=(nx+xi_max)): ## support negative indexing
|
|
867
|
+
xfi_.append(c)
|
|
868
|
+
elif (xi_max>=0) and (c<=xi_max):
|
|
869
|
+
xfi_.append(c)
|
|
870
|
+
xfi=np.array(xfi_, dtype=np.int64)
|
|
871
|
+
|
|
872
|
+
if (yi_min is not None):
|
|
873
|
+
|
|
874
|
+
yfi_ = []
|
|
875
|
+
if verbose: even_print('yi_min', '%i'%yi_min)
|
|
876
|
+
for c in yfi:
|
|
877
|
+
if (yi_min<0) and (c>=(ny+yi_min)): ## support negative indexing
|
|
878
|
+
yfi_.append(c)
|
|
879
|
+
elif (yi_min>=0) and (c>=yi_min):
|
|
880
|
+
yfi_.append(c)
|
|
881
|
+
yfi=np.array(yfi_, dtype=np.int64)
|
|
882
|
+
|
|
883
|
+
if (yi_max is not None):
|
|
884
|
+
|
|
885
|
+
yfi_ = []
|
|
886
|
+
if verbose: even_print('yi_max', '%i'%yi_max)
|
|
887
|
+
for c in yfi:
|
|
888
|
+
if (yi_max<0) and (c<=(ny+yi_max)): ## support negative indexing
|
|
889
|
+
yfi_.append(c)
|
|
890
|
+
elif (yi_max>=0) and (c<=yi_max):
|
|
891
|
+
yfi_.append(c)
|
|
892
|
+
yfi=np.array(yfi_, dtype=np.int64)
|
|
893
|
+
|
|
894
|
+
if (zi_min is not None):
|
|
895
|
+
|
|
896
|
+
zfi_ = []
|
|
897
|
+
if verbose: even_print('zi_min', '%i'%zi_min)
|
|
898
|
+
for c in zfi:
|
|
899
|
+
if (zi_min<0) and (c>=(nz+zi_min)): ## support negative indexing
|
|
900
|
+
zfi_.append(c)
|
|
901
|
+
elif (zi_min>=0) and (c>=zi_min):
|
|
902
|
+
zfi_.append(c)
|
|
903
|
+
zfi=np.array(zfi_, dtype=np.int64)
|
|
904
|
+
|
|
905
|
+
if (zi_max is not None):
|
|
906
|
+
|
|
907
|
+
zfi_ = []
|
|
908
|
+
if verbose: even_print('zi_max', '%i'%zi_max)
|
|
909
|
+
for c in zfi:
|
|
910
|
+
if (zi_max<0) and (c<=(nz+zi_max)): ## support negative indexing
|
|
911
|
+
zfi_.append(c)
|
|
912
|
+
elif (zi_max>=0) and (c<=zi_max):
|
|
913
|
+
zfi_.append(c)
|
|
914
|
+
zfi=np.array(zfi_, dtype=np.int64)
|
|
915
|
+
|
|
916
|
+
## resolution filter (skip every n grid points in each direction)
|
|
917
|
+
if (sx!=1):
|
|
918
|
+
if verbose: even_print('sx', '%i'%sx)
|
|
919
|
+
xfi = xfi[::sx]
|
|
920
|
+
if (sy!=1):
|
|
921
|
+
if verbose: even_print('sy', '%i'%sy)
|
|
922
|
+
yfi = yfi[::sy]
|
|
923
|
+
if (sz!=1):
|
|
924
|
+
if verbose: even_print('sz', '%i'%sz)
|
|
925
|
+
zfi = zfi[::sz]
|
|
926
|
+
|
|
927
|
+
if hasFilters:
|
|
928
|
+
|
|
929
|
+
if (xfi.size==0):
|
|
930
|
+
raise ValueError('x grid filter is empty... check!')
|
|
931
|
+
if (yfi.size==0):
|
|
932
|
+
raise ValueError('y grid filter is empty... check!')
|
|
933
|
+
if (zfi.size==0):
|
|
934
|
+
raise ValueError('z grid filter is empty... check!')
|
|
935
|
+
|
|
936
|
+
## set 'True' for indices to be read
|
|
937
|
+
xfiR[xfi] = True
|
|
938
|
+
yfiR[yfi] = True
|
|
939
|
+
zfiR[zfi] = True
|
|
940
|
+
|
|
941
|
+
## write 1D grid filters to HDF5
|
|
942
|
+
self.create_dataset('dims/xfi' , data=xfi )
|
|
943
|
+
self.create_dataset('dims/yfi' , data=yfi )
|
|
944
|
+
self.create_dataset('dims/zfi' , data=zfi )
|
|
945
|
+
self.create_dataset('dims/xfiR' , data=xfiR )
|
|
946
|
+
self.create_dataset('dims/yfiR' , data=yfiR )
|
|
947
|
+
self.create_dataset('dims/zfiR' , data=zfiR )
|
|
948
|
+
|
|
949
|
+
## overwrite 1D grid vectors
|
|
950
|
+
x = np.copy(x[xfi])
|
|
951
|
+
y = np.copy(y[yfi])
|
|
952
|
+
z = np.copy(z[zfi])
|
|
953
|
+
|
|
954
|
+
nx = x.shape[0]
|
|
955
|
+
ny = y.shape[0]
|
|
956
|
+
nz = z.shape[0]
|
|
957
|
+
ngp = nx*ny*nz
|
|
958
|
+
|
|
959
|
+
if verbose: even_print('nx' , f'{nx:d}' )
|
|
960
|
+
if verbose: even_print('ny' , f'{ny:d}' )
|
|
961
|
+
if verbose: even_print('nz' , f'{nz:d}' )
|
|
962
|
+
if verbose: even_print('ngp' , f'{ngp:d}' )
|
|
963
|
+
|
|
964
|
+
self.nx = nx
|
|
965
|
+
self.ny = ny
|
|
966
|
+
self.nz = nz
|
|
967
|
+
self.ngp = ngp
|
|
968
|
+
|
|
969
|
+
## write 1D [x,y,z] coord arrays
|
|
970
|
+
if ('dims/x' in self):
|
|
971
|
+
del self['dims/x']
|
|
972
|
+
self.create_dataset('dims/x', data=x)
|
|
973
|
+
if ('dims/y' in self):
|
|
974
|
+
del self['dims/y']
|
|
975
|
+
self.create_dataset('dims/y', data=y)
|
|
976
|
+
if ('dims/z' in self):
|
|
977
|
+
del self['dims/z']
|
|
978
|
+
self.create_dataset('dims/z', data=z)
|
|
979
|
+
|
|
980
|
+
if verbose: print(72*'-')
|
|
981
|
+
self.get_header(verbose=True)
|
|
982
|
+
if verbose: print(72*'-')
|
|
983
|
+
return
|
|
984
|
+
|
|
985
|
+
def init_from_rgd(self, fn_rgd, **kwargs):
|
|
986
|
+
'''
|
|
987
|
+
initialize an RGD from an RGD (copy over header data & coordinate data)
|
|
988
|
+
'''
|
|
989
|
+
|
|
990
|
+
t_info = kwargs.get('t_info',True)
|
|
991
|
+
#chunk_kb = kwargs.get('chunk_kb',4*1024) ## 4 [MB]
|
|
992
|
+
|
|
993
|
+
#verbose = kwargs.get('verbose',True)
|
|
994
|
+
#if (self.rank!=0):
|
|
995
|
+
# verbose=False
|
|
996
|
+
|
|
997
|
+
## set default attributes: fsubtype, fclass
|
|
998
|
+
self.attrs['fsubtype'] = 'unsteady'
|
|
999
|
+
self.attrs['fclass'] = 'rgd'
|
|
1000
|
+
|
|
1001
|
+
with rgd(fn_rgd, 'r', driver=self.driver, comm=self.comm) as hf_ref:
|
|
1002
|
+
|
|
1003
|
+
## copy over fsubtype
|
|
1004
|
+
if hasattr(hf_ref,'fsubtype'):
|
|
1005
|
+
self.attrs['fsubtype'] = hf_ref.fsubtype
|
|
1006
|
+
|
|
1007
|
+
# === copy over header info if needed
|
|
1008
|
+
|
|
1009
|
+
## copy top-level attributes
|
|
1010
|
+
for key in hf_ref.attrs:
|
|
1011
|
+
self.attrs[key] = hf_ref.attrs[key]
|
|
1012
|
+
|
|
1013
|
+
if all([('header/udef_real' in self),('header/udef_char' in self)]):
|
|
1014
|
+
raise ValueError('udef already present')
|
|
1015
|
+
else:
|
|
1016
|
+
udef = hf_ref.udef
|
|
1017
|
+
udef_real = list(udef.values())
|
|
1018
|
+
udef_char = list(udef.keys())
|
|
1019
|
+
udef_real_h5 = np.array(udef_real, dtype=np.float64)
|
|
1020
|
+
udef_char_h5 = np.array([s.encode('ascii', 'ignore') for s in udef_char], dtype='S128')
|
|
1021
|
+
|
|
1022
|
+
self.create_dataset('header/udef_real', data=udef_real_h5, maxshape=np.shape(udef_real_h5), dtype=np.float64)
|
|
1023
|
+
self.create_dataset('header/udef_char', data=udef_char_h5, maxshape=np.shape(udef_char_h5), dtype='S128')
|
|
1024
|
+
self.udef = udef
|
|
1025
|
+
self.udef_real = udef_real
|
|
1026
|
+
self.udef_char = udef_char
|
|
1027
|
+
|
|
1028
|
+
# === copy over spatial dim info
|
|
1029
|
+
|
|
1030
|
+
x = np.copy( hf_ref.x )
|
|
1031
|
+
y = np.copy( hf_ref.y )
|
|
1032
|
+
z = np.copy( hf_ref.z )
|
|
1033
|
+
|
|
1034
|
+
self.nx = x.size
|
|
1035
|
+
self.ny = y.size
|
|
1036
|
+
self.nz = z.size
|
|
1037
|
+
self.ngp = self.nx*self.ny*self.nz
|
|
1038
|
+
if ('dims/x' in self):
|
|
1039
|
+
del self['dims/x']
|
|
1040
|
+
if ('dims/y' in self):
|
|
1041
|
+
del self['dims/y']
|
|
1042
|
+
if ('dims/z' in self):
|
|
1043
|
+
del self['dims/z']
|
|
1044
|
+
|
|
1045
|
+
self.create_dataset('dims/x', data=x)
|
|
1046
|
+
self.create_dataset('dims/y', data=y)
|
|
1047
|
+
self.create_dataset('dims/z', data=z)
|
|
1048
|
+
|
|
1049
|
+
# === copy over temporal dim info
|
|
1050
|
+
|
|
1051
|
+
if t_info:
|
|
1052
|
+
self.t = hf_ref.t
|
|
1053
|
+
self.nt = self.t.size
|
|
1054
|
+
self.create_dataset('dims/t', data=hf_ref.t)
|
|
1055
|
+
else:
|
|
1056
|
+
t = np.array([0.], dtype=np.float64)
|
|
1057
|
+
if ('dims/t' in self):
|
|
1058
|
+
del self['dims/t']
|
|
1059
|
+
self.create_dataset('dims/t', data=t)
|
|
1060
|
+
|
|
1061
|
+
# ===
|
|
1062
|
+
|
|
1063
|
+
## copy over [data_dim/<>] dsets if present
|
|
1064
|
+
if ('data_dim' in hf_ref):
|
|
1065
|
+
for dsn in hf_ref['data_dim'].keys():
|
|
1066
|
+
data = np.copy( hf_ref[f'data_dim/{dsn}'][()] )
|
|
1067
|
+
self.create_dataset(f'data_dim/{dsn}', data=data, chunks=None)
|
|
1068
|
+
if self.usingmpi: self.comm.Barrier()
|
|
1069
|
+
|
|
1070
|
+
self.get_header(verbose=False)
|
|
1071
|
+
return
|
|
1072
|
+
|
|
1073
|
+
def import_eas4(self, fn_eas4_list, **kwargs):
|
|
1074
|
+
'''
|
|
1075
|
+
import data from a series of EAS4 files to a RGD
|
|
1076
|
+
'''
|
|
1077
|
+
|
|
1078
|
+
if (self.rank!=0):
|
|
1079
|
+
verbose=False
|
|
1080
|
+
else:
|
|
1081
|
+
verbose=True
|
|
1082
|
+
|
|
1083
|
+
#EAS4_NO_G=1; EAS4_X0DX_G=2; EAS4_UDEF_G=3; EAS4_ALL_G=4; EAS4_FULL_G=5
|
|
1084
|
+
gmode_dict = {1:'EAS4_NO_G', 2:'EAS4_X0DX_G', 3:'EAS4_UDEF_G', 4:'EAS4_ALL_G', 5:'EAS4_FULL_G'}
|
|
1085
|
+
|
|
1086
|
+
if verbose: print('\n'+'rgd.import_eas4()'+'\n'+72*'-')
|
|
1087
|
+
t_start_func = timeit.default_timer()
|
|
1088
|
+
|
|
1089
|
+
ntbuf = kwargs.get('ntbuf',1) ## [t] R/W buffer size
|
|
1090
|
+
if not isinstance(ntbuf, int):
|
|
1091
|
+
raise ValueError('ntbuf must be type int')
|
|
1092
|
+
if (ntbuf<1):
|
|
1093
|
+
raise ValueError('ntbuf<1')
|
|
1094
|
+
|
|
1095
|
+
if self.open_mode=='r':
|
|
1096
|
+
raise ValueError('cant do import or file initialization if file has been opened in read-only mode.')
|
|
1097
|
+
|
|
1098
|
+
report_reads = kwargs.get('report_reads',False)
|
|
1099
|
+
report_writes = kwargs.get('report_writes',True)
|
|
1100
|
+
|
|
1101
|
+
ti_min = kwargs.get('ti_min',None)
|
|
1102
|
+
ti_max = kwargs.get('ti_max',None)
|
|
1103
|
+
tt_min = kwargs.get('tt_min',None)
|
|
1104
|
+
tt_max = kwargs.get('tt_max',None)
|
|
1105
|
+
|
|
1106
|
+
## dont actually copy over data, just initialize datasets with 0's
|
|
1107
|
+
init_dsets_only = kwargs.get('init_dsets_only',False)
|
|
1108
|
+
|
|
1109
|
+
## delete EAS4s after import --> DANGER!
|
|
1110
|
+
delete_after_import = kwargs.get('delete_after_import',False)
|
|
1111
|
+
|
|
1112
|
+
## if you're just initializing, don't allow delete
|
|
1113
|
+
if (init_dsets_only and delete_after_import):
|
|
1114
|
+
raise ValueError("if init_dsets_only=True, then delete_after_import should not be activated!")
|
|
1115
|
+
|
|
1116
|
+
## delete only allowed if no time ranges are selected
|
|
1117
|
+
if delete_after_import and any([(ti_min is not None),(ti_max is not None),(tt_min is not None),(tt_max is not None)]):
|
|
1118
|
+
raise ValueError("if delete_after_import=True, then ti_min,ti_max,tt_min,tt_max are not supported")
|
|
1119
|
+
|
|
1120
|
+
chunk_kb = kwargs.get('chunk_kb',2*1024) ## h5 chunk size: default 2 [MB]
|
|
1121
|
+
#chunk_kb = kwargs.get('chunk_kb',64*1024) ## h5 chunk size: default 64 [MB]
|
|
1122
|
+
#chunk_base = kwargs.get('chunk_base',2)
|
|
1123
|
+
|
|
1124
|
+
## used later when determining whether to re-initialize datasets
|
|
1125
|
+
#chunk_constraint = kwargs.get('chunk_constraint',(1,None,None,None))
|
|
1126
|
+
chunk_constraint = kwargs.get('chunk_constraint',None)
|
|
1127
|
+
if chunk_constraint is None:
|
|
1128
|
+
chunk_constraint_was_provided = False
|
|
1129
|
+
else:
|
|
1130
|
+
chunk_constraint_was_provided = True
|
|
1131
|
+
|
|
1132
|
+
## float precision when copying
|
|
1133
|
+
## default is 'single' i.e. cast data to single
|
|
1134
|
+
## 'same' will preserve the floating point precision from the EAS4 file
|
|
1135
|
+
prec = kwargs.get('prec',None)
|
|
1136
|
+
if (prec is None):
|
|
1137
|
+
prec = 'single'
|
|
1138
|
+
elif (prec=='single'):
|
|
1139
|
+
pass
|
|
1140
|
+
elif (prec=='same'):
|
|
1141
|
+
pass
|
|
1142
|
+
else:
|
|
1143
|
+
raise ValueError('prec not set correctly')
|
|
1144
|
+
|
|
1145
|
+
## check for an often made mistake
|
|
1146
|
+
## 'ts_min' / 'ts_max' should NOT be allowed as inputs
|
|
1147
|
+
ts_min = kwargs.get('ts_min',None)
|
|
1148
|
+
ts_max = kwargs.get('ts_max',None)
|
|
1149
|
+
if (ts_min is not None):
|
|
1150
|
+
raise ValueError('ts_min is not an option --> did you mean ti_min or tt_min?')
|
|
1151
|
+
if (ts_max is not None):
|
|
1152
|
+
raise ValueError('ts_max is not an option --> did you mean ti_max or tt_max?')
|
|
1153
|
+
del ts_min
|
|
1154
|
+
del ts_max
|
|
1155
|
+
|
|
1156
|
+
## check that the passed iterable of EAS4 files is OK
|
|
1157
|
+
if not hasattr(fn_eas4_list, '__iter__'):
|
|
1158
|
+
raise ValueError('first arg \'fn_eas4_list\' must be iterable')
|
|
1159
|
+
for fn_eas4 in fn_eas4_list:
|
|
1160
|
+
if not os.path.isfile(fn_eas4):
|
|
1161
|
+
raise FileNotFoundError('%s not found!'%fn_eas4)
|
|
1162
|
+
|
|
1163
|
+
## ranks per direction
|
|
1164
|
+
rx = kwargs.get('rx',1)
|
|
1165
|
+
ry = kwargs.get('ry',1)
|
|
1166
|
+
rz = kwargs.get('rz',1)
|
|
1167
|
+
rt = kwargs.get('rt',1)
|
|
1168
|
+
|
|
1169
|
+
## check validity of rank declaration
|
|
1170
|
+
if not all(isinstance(rr,int) and rr>0 for rr in (rx,ry,rz,rt)):
|
|
1171
|
+
raise ValueError('rx,ry,rz,rt must be positive integers')
|
|
1172
|
+
if (rx*ry*rz*rt != self.n_ranks):
|
|
1173
|
+
raise ValueError('rx*ry*rz*rt != self.n_ranks')
|
|
1174
|
+
if (rx>self.nx):
|
|
1175
|
+
raise ValueError('rx>self.nx')
|
|
1176
|
+
if (ry>self.ny):
|
|
1177
|
+
raise ValueError('ry>self.ny')
|
|
1178
|
+
if (rz>self.nz):
|
|
1179
|
+
raise ValueError('rz>self.nz')
|
|
1180
|
+
if not self.usingmpi:
|
|
1181
|
+
if rx>1:
|
|
1182
|
+
if verbose: print(f'WARNING: file not opened in MPI mode but rx={rx:d}... setting rx=1')
|
|
1183
|
+
rx = 1
|
|
1184
|
+
if ry>1:
|
|
1185
|
+
if verbose: print(f'WARNING: file not opened in MPI mode but ry={ry:d}... setting ry=1')
|
|
1186
|
+
ry = 1
|
|
1187
|
+
if rz>1:
|
|
1188
|
+
if verbose: print(f'WARNING: file not opened in MPI mode but rz={rz:d}... setting rz=1')
|
|
1189
|
+
rz = 1
|
|
1190
|
+
|
|
1191
|
+
## st = timestep skip
|
|
1192
|
+
## spatial [x,y,z] skips done in init_from_XXX()
|
|
1193
|
+
st = kwargs.get('st',1)
|
|
1194
|
+
|
|
1195
|
+
if not isinstance(st, int):
|
|
1196
|
+
raise ValueError('time skip parameter st should be type int')
|
|
1197
|
+
if (st<1):
|
|
1198
|
+
raise ValueError('st<1')
|
|
1199
|
+
|
|
1200
|
+
## update this RGD's header and attributes
|
|
1201
|
+
self.get_header(verbose=False)
|
|
1202
|
+
|
|
1203
|
+
if self.usingmpi:
|
|
1204
|
+
comm_eas4 = MPI.COMM_WORLD ## communicator for opening EAS4s
|
|
1205
|
+
else:
|
|
1206
|
+
comm_eas4 = None
|
|
1207
|
+
|
|
1208
|
+
## get all time info & check
|
|
1209
|
+
if (self.rank==0):
|
|
1210
|
+
t = np.array([], dtype=np.float64)
|
|
1211
|
+
for fn_eas4 in fn_eas4_list:
|
|
1212
|
+
with eas4(fn_eas4, 'r', verbose=False) as hf_eas4:
|
|
1213
|
+
t_ = np.copy(hf_eas4.t)
|
|
1214
|
+
t = np.concatenate((t,t_))
|
|
1215
|
+
else:
|
|
1216
|
+
t = np.array([],dtype=np.float64) ## 't' must exist on all ranks prior to bcast
|
|
1217
|
+
|
|
1218
|
+
if self.usingmpi:
|
|
1219
|
+
self.comm.Barrier()
|
|
1220
|
+
|
|
1221
|
+
## broadcast concatenated time vector to all ranks
|
|
1222
|
+
if self.usingmpi:
|
|
1223
|
+
t = self.comm.bcast(t, root=0)
|
|
1224
|
+
|
|
1225
|
+
if verbose: even_print('n EAS4 files','%i'%len(fn_eas4_list))
|
|
1226
|
+
if verbose: even_print('nt all files','%i'%t.size)
|
|
1227
|
+
if verbose: even_print('delete after import',str(delete_after_import))
|
|
1228
|
+
if verbose: print(72*'-')
|
|
1229
|
+
|
|
1230
|
+
if (t.size>1):
|
|
1231
|
+
|
|
1232
|
+
## check no zero distance elements
|
|
1233
|
+
if np.any(np.diff(t) == 0):
|
|
1234
|
+
raise ValueError('t arr has zero-distance elements')
|
|
1235
|
+
else:
|
|
1236
|
+
if verbose: even_print('check: Δt!=0','passed')
|
|
1237
|
+
|
|
1238
|
+
## check monotonically increasing
|
|
1239
|
+
if not np.all(np.diff(t) > 0.):
|
|
1240
|
+
raise ValueError('t arr not monotonically increasing')
|
|
1241
|
+
else:
|
|
1242
|
+
if verbose: even_print('check: t mono increasing','passed')
|
|
1243
|
+
|
|
1244
|
+
## check constant Δt
|
|
1245
|
+
dt0 = np.diff(t)[0]
|
|
1246
|
+
if not np.all(np.isclose(np.diff(t), dt0, rtol=1e-3)):
|
|
1247
|
+
if (self.rank==0): print(np.diff(t))
|
|
1248
|
+
raise ValueError('t arr not uniformly spaced')
|
|
1249
|
+
else:
|
|
1250
|
+
if verbose: even_print('check: constant Δt','passed')
|
|
1251
|
+
|
|
1252
|
+
# === get all grid info & check
|
|
1253
|
+
|
|
1254
|
+
if len(fn_eas4_list)>1:
|
|
1255
|
+
if self.rank==0:
|
|
1256
|
+
eas4_x_arr = []
|
|
1257
|
+
eas4_y_arr = []
|
|
1258
|
+
eas4_z_arr = []
|
|
1259
|
+
for fn_eas4 in fn_eas4_list:
|
|
1260
|
+
with eas4(fn_eas4, 'r', verbose=False) as hf_eas4:
|
|
1261
|
+
eas4_x_arr.append( hf_eas4.x )
|
|
1262
|
+
eas4_y_arr.append( hf_eas4.y )
|
|
1263
|
+
eas4_z_arr.append( hf_eas4.z )
|
|
1264
|
+
|
|
1265
|
+
## check coordinate vectors are same
|
|
1266
|
+
if not np.all([np.allclose(eas4_z_arr[i],eas4_z_arr[0],rtol=1e-8,atol=1e-8) for i in range(len(fn_eas4_list))]):
|
|
1267
|
+
raise ValueError('EAS4 files do not have the same [z] coordinates')
|
|
1268
|
+
if self.usingmpi: self.comm.Abort(1)
|
|
1269
|
+
else:
|
|
1270
|
+
if verbose: even_print('check: [z] coordinate vectors equal','passed')
|
|
1271
|
+
|
|
1272
|
+
if not np.all([np.allclose(eas4_y_arr[i],eas4_y_arr[0],rtol=1e-8,atol=1e-8) for i in range(len(fn_eas4_list))]):
|
|
1273
|
+
raise ValueError('EAS4 files do not have the same [y] coordinates')
|
|
1274
|
+
if self.usingmpi: self.comm.Abort(1)
|
|
1275
|
+
else:
|
|
1276
|
+
if verbose: even_print('check: [y] coordinate vectors equal','passed')
|
|
1277
|
+
|
|
1278
|
+
if not np.all([np.allclose(eas4_x_arr[i],eas4_x_arr[0],rtol=1e-8,atol=1e-8) for i in range(len(fn_eas4_list))]):
|
|
1279
|
+
raise ValueError('EAS4 files do not have the same [x] coordinates')
|
|
1280
|
+
if self.usingmpi: self.comm.Abort(1)
|
|
1281
|
+
else:
|
|
1282
|
+
if verbose: even_print('check: [x] coordinate vectors equal','passed')
|
|
1283
|
+
|
|
1284
|
+
if verbose: print(72*'-')
|
|
1285
|
+
|
|
1286
|
+
if self.usingmpi:
|
|
1287
|
+
self.comm.Barrier()
|
|
1288
|
+
|
|
1289
|
+
## [t] resolution filter (skip every N timesteps)
|
|
1290
|
+
tfi = np.arange(t.size, dtype=np.int64)
|
|
1291
|
+
if (st!=1):
|
|
1292
|
+
if verbose:
|
|
1293
|
+
even_print('st', f'{st:d}')
|
|
1294
|
+
print(72*'-')
|
|
1295
|
+
tfi = np.copy( tfi[::st] )
|
|
1296
|
+
|
|
1297
|
+
## initialize 'doRead' vector --> boolean vector to be updated
|
|
1298
|
+
doRead = np.full((t.size,), True, dtype=bool)
|
|
1299
|
+
|
|
1300
|
+
## skip filter
|
|
1301
|
+
if (st!=1):
|
|
1302
|
+
doRead[np.isin(np.arange(t.size),tfi,invert=True)] = False
|
|
1303
|
+
|
|
1304
|
+
## min/max index filter
|
|
1305
|
+
if (ti_min is not None):
|
|
1306
|
+
if not isinstance(ti_min, int):
|
|
1307
|
+
raise TypeError('ti_min must be type int')
|
|
1308
|
+
doRead[:ti_min] = False
|
|
1309
|
+
if (ti_max is not None):
|
|
1310
|
+
if not isinstance(ti_max, int):
|
|
1311
|
+
raise TypeError('ti_max must be type int')
|
|
1312
|
+
doRead[ti_max:] = False
|
|
1313
|
+
if (tt_min is not None):
|
|
1314
|
+
if (tt_min>=0.):
|
|
1315
|
+
doRead[np.where((t-t.min())<tt_min)] = False
|
|
1316
|
+
elif (tt_min<0.):
|
|
1317
|
+
doRead[np.where((t-t.max())<tt_min)] = False
|
|
1318
|
+
if (tt_max is not None):
|
|
1319
|
+
if (tt_max>=0.):
|
|
1320
|
+
doRead[np.where((t-t.min())>tt_max)] = False
|
|
1321
|
+
elif (tt_max<0.):
|
|
1322
|
+
doRead[np.where((t-t.max())>tt_max)] = False
|
|
1323
|
+
|
|
1324
|
+
## RGD time attributes
|
|
1325
|
+
self.t = np.copy(t[doRead]) ## filter times by True/False from boolean vector doRead
|
|
1326
|
+
self.nt = self.t.shape[0]
|
|
1327
|
+
self.ti = np.arange(self.nt, dtype=np.int64)
|
|
1328
|
+
|
|
1329
|
+
# ## update [t]
|
|
1330
|
+
# if ('dims/t' in self):
|
|
1331
|
+
# t_ = np.copy(self['dims/t'][()])
|
|
1332
|
+
# if not np.allclose(t_, self.t, rtol=1e-8, atol=1e-8):
|
|
1333
|
+
# if verbose:
|
|
1334
|
+
# print('>>> [t] in file not match [t] that has been determined ... overwriting')
|
|
1335
|
+
# del self['dims/t']
|
|
1336
|
+
# self.create_dataset('dims/t', data=self.t)
|
|
1337
|
+
# else:
|
|
1338
|
+
# self.create_dataset('dims/t', data=self.t)
|
|
1339
|
+
|
|
1340
|
+
## update [t]
|
|
1341
|
+
if ('dims/t' in self):
|
|
1342
|
+
del self['dims/t']
|
|
1343
|
+
self.create_dataset('dims/t', data=self.t)
|
|
1344
|
+
|
|
1345
|
+
## divide spatial OUTPUT grid by ranks
|
|
1346
|
+
## if no grid filter present, then INPUT = OUTPUT
|
|
1347
|
+
if self.usingmpi:
|
|
1348
|
+
comm4d = self.comm.Create_cart(dims=[rx,ry,rz,rt], periods=[False,False,False,False], reorder=False)
|
|
1349
|
+
t4d = comm4d.Get_coords(self.rank)
|
|
1350
|
+
|
|
1351
|
+
rxl_ = np.array_split(np.arange(self.nx,dtype=np.int64),min(rx,self.nx))
|
|
1352
|
+
ryl_ = np.array_split(np.arange(self.ny,dtype=np.int64),min(ry,self.ny))
|
|
1353
|
+
rzl_ = np.array_split(np.arange(self.nz,dtype=np.int64),min(rz,self.nz))
|
|
1354
|
+
#rtl_ = np.array_split(np.arange(self.nt,dtype=np.int64),min(rt,self.nt))
|
|
1355
|
+
|
|
1356
|
+
rxl = [[b[0],b[-1]+1] for b in rxl_ ]
|
|
1357
|
+
ryl = [[b[0],b[-1]+1] for b in ryl_ ]
|
|
1358
|
+
rzl = [[b[0],b[-1]+1] for b in rzl_ ]
|
|
1359
|
+
#rtl = [[b[0],b[-1]+1] for b in rtl_ ]
|
|
1360
|
+
|
|
1361
|
+
rx1, rx2 = rxl[t4d[0]]
|
|
1362
|
+
ry1, ry2 = ryl[t4d[1]]
|
|
1363
|
+
rz1, rz2 = rzl[t4d[2]]
|
|
1364
|
+
#rt1, rt2 = rtl[t4d[3]]
|
|
1365
|
+
|
|
1366
|
+
nxr = rx2 - rx1
|
|
1367
|
+
nyr = ry2 - ry1
|
|
1368
|
+
nzr = rz2 - rz1
|
|
1369
|
+
#ntr = rt2 - rt1
|
|
1370
|
+
|
|
1371
|
+
else:
|
|
1372
|
+
nxr = self.nx
|
|
1373
|
+
nyr = self.ny
|
|
1374
|
+
nzr = self.nz
|
|
1375
|
+
#ntr = self.nt
|
|
1376
|
+
|
|
1377
|
+
## divide spatial READ/INPUT grid by ranks --> if grid filters present
|
|
1378
|
+
if self.hasGridFilter:
|
|
1379
|
+
if self.usingmpi:
|
|
1380
|
+
|
|
1381
|
+
## this rank's indices to read from FULL file, indices in global context
|
|
1382
|
+
xfi_ = np.copy( self.xfi[rx1:rx2] )
|
|
1383
|
+
yfi_ = np.copy( self.yfi[ry1:ry2] )
|
|
1384
|
+
zfi_ = np.copy( self.zfi[rz1:rz2] )
|
|
1385
|
+
|
|
1386
|
+
## this rank's global read RANGE from FULL file
|
|
1387
|
+
rx1R,rx2R = xfi_.min() , xfi_.max()+1
|
|
1388
|
+
ry1R,ry2R = yfi_.min() , yfi_.max()+1
|
|
1389
|
+
rz1R,rz2R = zfi_.min() , zfi_.max()+1
|
|
1390
|
+
|
|
1391
|
+
## this rank's LOCAL index filter to cut down read data
|
|
1392
|
+
xfi_local = np.copy( xfi_ - rx1R )
|
|
1393
|
+
yfi_local = np.copy( yfi_ - ry1R )
|
|
1394
|
+
zfi_local = np.copy( zfi_ - rz1R )
|
|
1395
|
+
|
|
1396
|
+
## determine RGD scalars (get from EAS4 scalars)
|
|
1397
|
+
if not hasattr(self,'scalars') or (len(self.scalars)==0):
|
|
1398
|
+
|
|
1399
|
+
with eas4(fn_eas4_list[0], 'r', verbose=False, driver=self.driver, comm=comm_eas4) as hf_eas4:
|
|
1400
|
+
self.scalars = hf_eas4.scalars
|
|
1401
|
+
self.n_scalars = len(self.scalars)
|
|
1402
|
+
|
|
1403
|
+
## decide dtypes
|
|
1404
|
+
for scalar in hf_eas4.scalars:
|
|
1405
|
+
|
|
1406
|
+
ti = 0
|
|
1407
|
+
dsn = f'Data/{hf_eas4.domainName}/ts_{ti:06d}/par_{hf_eas4.scalar_n_map[scalar]:06d}'
|
|
1408
|
+
dset = hf_eas4[dsn]
|
|
1409
|
+
dtype = dset.dtype
|
|
1410
|
+
|
|
1411
|
+
if (prec=='same'):
|
|
1412
|
+
self.scalars_dtypes_dict[scalar] = dtype
|
|
1413
|
+
elif (prec=='single'):
|
|
1414
|
+
if (dtype!=np.float32) and (dtype!=np.float64): ## make sure its either a single or double float
|
|
1415
|
+
raise ValueError
|
|
1416
|
+
self.scalars_dtypes_dict[scalar] = np.dtype(np.float32)
|
|
1417
|
+
else:
|
|
1418
|
+
raise ValueError
|
|
1419
|
+
|
|
1420
|
+
if self.usingmpi:
|
|
1421
|
+
comm_eas4.Barrier()
|
|
1422
|
+
|
|
1423
|
+
# ==============================================================
|
|
1424
|
+
# initialize datasets
|
|
1425
|
+
# ==============================================================
|
|
1426
|
+
|
|
1427
|
+
if verbose:
|
|
1428
|
+
progress_bar = tqdm(
|
|
1429
|
+
total=len(self.scalars),
|
|
1430
|
+
ncols=100,
|
|
1431
|
+
desc='initialize dsets',
|
|
1432
|
+
leave=True,
|
|
1433
|
+
file=sys.stdout,
|
|
1434
|
+
mininterval=0.1,
|
|
1435
|
+
smoothing=0.,
|
|
1436
|
+
#bar_format="\033[B{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}\033[A\n\b",
|
|
1437
|
+
bar_format="{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}",
|
|
1438
|
+
ascii="░█",
|
|
1439
|
+
colour='#FF6600',
|
|
1440
|
+
)
|
|
1441
|
+
|
|
1442
|
+
for scalar in self.scalars:
|
|
1443
|
+
|
|
1444
|
+
dtype = self.scalars_dtypes_dict[scalar]
|
|
1445
|
+
data_gb = dtype.itemsize * self.nt*self.nz*self.ny*self.nx / 1024**3
|
|
1446
|
+
shape = (self.nt,self.nz,self.ny,self.nx)
|
|
1447
|
+
|
|
1448
|
+
## the user provided a chunk_constraint, so calculate it
|
|
1449
|
+
if chunk_constraint_was_provided:
|
|
1450
|
+
chunks = h5_chunk_sizer(nxi=shape, constraint=chunk_constraint, size_kb=chunk_kb, itemsize=dtype.itemsize)
|
|
1451
|
+
else:
|
|
1452
|
+
if self.usingmpi:
|
|
1453
|
+
chunks = h5_chunk_sizer(nxi=shape, constraint=(1,('max',self.nz//rz),('max',self.ny//ry),('max',self.nx//rx)), size_kb=chunk_kb, itemsize=dtype.itemsize)
|
|
1454
|
+
else:
|
|
1455
|
+
chunks = h5_chunk_sizer(nxi=shape, constraint=(1,None,None,None), size_kb=chunk_kb, itemsize=dtype.itemsize)
|
|
1456
|
+
|
|
1457
|
+
do_dset_initialize = True ## default value which, if all conditions are met, will be turned False
|
|
1458
|
+
|
|
1459
|
+
dsn = f'data/{scalar}'
|
|
1460
|
+
|
|
1461
|
+
## check if dataset already exists and matches conditions
|
|
1462
|
+
## ... if conditions are met then skip re-initializing dataset
|
|
1463
|
+
if self.open_mode in ('a','r+') and (dsn in self):
|
|
1464
|
+
dset = self[dsn]
|
|
1465
|
+
if verbose:
|
|
1466
|
+
tqdm.write(even_print(f'dset {dsn} already exists', str(True), s=True))
|
|
1467
|
+
|
|
1468
|
+
shape_matches = dset.shape == shape
|
|
1469
|
+
dtype_matches = dset.dtype == dtype
|
|
1470
|
+
|
|
1471
|
+
## either 1) no constraint was given or 2) constraint was given AND it matches
|
|
1472
|
+
chunks_match = not chunk_constraint_was_provided or dset.chunks == chunks
|
|
1473
|
+
|
|
1474
|
+
## if no constraint was given, copy back existing chunk
|
|
1475
|
+
if not chunk_constraint_was_provided:
|
|
1476
|
+
chunks = dset.chunks
|
|
1477
|
+
|
|
1478
|
+
if verbose:
|
|
1479
|
+
tqdm.write(even_print(f'dset {dsn} shape matches', str(shape_matches), s=True))
|
|
1480
|
+
tqdm.write(even_print(f'dset {dsn} dtype matches', str(dtype_matches), s=True))
|
|
1481
|
+
if chunk_constraint_was_provided:
|
|
1482
|
+
tqdm.write(even_print(f'dset {dsn} chunks match', str(chunks_match), s=True))
|
|
1483
|
+
|
|
1484
|
+
if shape_matches and dtype_matches and chunks_match:
|
|
1485
|
+
do_dset_initialize = False
|
|
1486
|
+
|
|
1487
|
+
if do_dset_initialize:
|
|
1488
|
+
|
|
1489
|
+
if (f'data/{scalar}' in self):
|
|
1490
|
+
del self[f'data/{scalar}']
|
|
1491
|
+
|
|
1492
|
+
if self.usingmpi: self.comm.Barrier()
|
|
1493
|
+
t_start = timeit.default_timer()
|
|
1494
|
+
|
|
1495
|
+
if verbose:
|
|
1496
|
+
tqdm.write(even_print(f'initializing data/{scalar}', f'{data_gb:0.2f} [GB]', s=True))
|
|
1497
|
+
|
|
1498
|
+
## !!!!!!!!!!! this has a tendency to hang !!!!!!!!!!!
|
|
1499
|
+
## --> increasing Lustre stripe size tends to fix this (kwarg 'stripe_size_mb' upon 'w' open)
|
|
1500
|
+
dset = self.create_dataset(
|
|
1501
|
+
dsn,
|
|
1502
|
+
shape=shape,
|
|
1503
|
+
dtype=dtype,
|
|
1504
|
+
chunks=chunks,
|
|
1505
|
+
)
|
|
1506
|
+
|
|
1507
|
+
## write dummy data to dataset to ensure that it is truly initialized
|
|
1508
|
+
if not self.usingmpi:
|
|
1509
|
+
h5_ds_force_allocate_chunks(dset,verbose=verbose)
|
|
1510
|
+
|
|
1511
|
+
if self.usingmpi: self.comm.Barrier()
|
|
1512
|
+
t_delta = timeit.default_timer() - t_start
|
|
1513
|
+
if verbose:
|
|
1514
|
+
tqdm.write(even_print(f'initialize data/{scalar}', f'{data_gb:0.2f} [GB] {t_delta:0.2f} [s] {(data_gb/t_delta):0.3f} [GB/s]', s=True))
|
|
1515
|
+
|
|
1516
|
+
chunk_kb_ = np.prod(dset.chunks) * dset.dtype.itemsize / 1024. ## actual
|
|
1517
|
+
if verbose:
|
|
1518
|
+
tqdm.write(even_print('chunk shape (t,z,y,x)', str(dset.chunks), s=True))
|
|
1519
|
+
tqdm.write(even_print('chunk size', f'{int(round(chunk_kb_)):d} [KB]', s=True))
|
|
1520
|
+
|
|
1521
|
+
if verbose:
|
|
1522
|
+
progress_bar.update()
|
|
1523
|
+
|
|
1524
|
+
if self.usingmpi:
|
|
1525
|
+
self.comm.Barrier()
|
|
1526
|
+
if verbose:
|
|
1527
|
+
progress_bar.close()
|
|
1528
|
+
print(72*'-')
|
|
1529
|
+
|
|
1530
|
+
## report size of RGD after initialization
|
|
1531
|
+
if verbose: tqdm.write(even_print(os.path.basename(self.fname), f'{os.path.getsize(self.fname)/1024**3:0.2f} [GB]', s=True))
|
|
1532
|
+
if verbose: print(72*'-')
|
|
1533
|
+
|
|
1534
|
+
# ==============================================================
|
|
1535
|
+
# open & read EAS4s, read data into RAM, write to RGD
|
|
1536
|
+
# ==============================================================
|
|
1537
|
+
|
|
1538
|
+
if not init_dsets_only:
|
|
1539
|
+
|
|
1540
|
+
## should we tell the EAS4 to open with MPIIO hint 'romio_no_indep_rw' ?
|
|
1541
|
+
if self.usingmpi:
|
|
1542
|
+
eas4_no_indep_rw = True
|
|
1543
|
+
else:
|
|
1544
|
+
eas4_no_indep_rw = False
|
|
1545
|
+
|
|
1546
|
+
## get main dtype and confirm all scalar dtypes are same (limitation)
|
|
1547
|
+
dtype = self.scalars_dtypes_dict[self.scalars[0]]
|
|
1548
|
+
for scalar in self.scalars:
|
|
1549
|
+
if not ( np.dtype(self.scalars_dtypes_dict[scalar]) == np.dtype(dtype) ):
|
|
1550
|
+
raise NotImplementedError('dtype of scalars in output HDF5 file are not same. update!')
|
|
1551
|
+
|
|
1552
|
+
## current limitation of read buffer due to uncreative implementation
|
|
1553
|
+
if (self.nt%ntbuf!=0):
|
|
1554
|
+
raise ValueError(f'n timesteps to be read ({self.nt}) is not divisible by ntbuf ({ntbuf:d})')
|
|
1555
|
+
|
|
1556
|
+
## initialize read/write buffer
|
|
1557
|
+
databuf = np.zeros(shape=(ntbuf,nzr,nyr,nxr), dtype={'names':self.scalars, 'formats':[ dtype for s in self.scalars ]})
|
|
1558
|
+
buffer_nts_loaded = 0 ## counter for number of timesteps loaded in buffer
|
|
1559
|
+
buffers_written = -1 ## counter for number of buffers that have been written
|
|
1560
|
+
|
|
1561
|
+
#print(f'rank {self.rank:d} databuf shape : {str(databuf["u"].shape)}')
|
|
1562
|
+
|
|
1563
|
+
if self.usingmpi:
|
|
1564
|
+
self.comm.Barrier()
|
|
1565
|
+
|
|
1566
|
+
## report read/write buffer size
|
|
1567
|
+
if verbose:
|
|
1568
|
+
even_print( 'R/W buffer size (global)' , f'{ntbuf*np.prod(shape[1:])*len(self.scalars)*dtype.itemsize/1024**3:0.2f} [GB]' )
|
|
1569
|
+
print(72*'-')
|
|
1570
|
+
|
|
1571
|
+
if verbose:
|
|
1572
|
+
progress_bar = tqdm(
|
|
1573
|
+
#total=self.nt*self.n_scalars,
|
|
1574
|
+
total=self.nt//ntbuf, ## N buffer writes
|
|
1575
|
+
ncols=100,
|
|
1576
|
+
desc='import',
|
|
1577
|
+
leave=True,
|
|
1578
|
+
file=sys.stdout,
|
|
1579
|
+
mininterval=0.1,
|
|
1580
|
+
smoothing=0.,
|
|
1581
|
+
#bar_format="\033[B{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}\033[A\n\b",
|
|
1582
|
+
bar_format="{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}",
|
|
1583
|
+
ascii="░█",
|
|
1584
|
+
colour='#FF6600',
|
|
1585
|
+
)
|
|
1586
|
+
|
|
1587
|
+
## counters / timers
|
|
1588
|
+
data_gb_read = 0.
|
|
1589
|
+
data_gb_write = 0.
|
|
1590
|
+
t_read = 0.
|
|
1591
|
+
t_write = 0.
|
|
1592
|
+
|
|
1593
|
+
tii = -1 ## counter full series
|
|
1594
|
+
tiii = -1 ## counter RGD-local
|
|
1595
|
+
for fn_eas4 in fn_eas4_list: ## this has to stay the outer-most loop for file deletion purposes
|
|
1596
|
+
with eas4(fn_eas4, 'r', verbose=False, driver=self.driver, comm=comm_eas4, no_indep_rw=eas4_no_indep_rw) as hf_eas4:
|
|
1597
|
+
|
|
1598
|
+
if verbose: tqdm.write(even_print(os.path.basename(fn_eas4), '%0.2f [GB]'%(os.path.getsize(fn_eas4)/1024**3), s=True))
|
|
1599
|
+
|
|
1600
|
+
# if verbose: tqdm.write(even_print('gmode_dim1' , '%i'%hf_eas4.gmode_dim1 , s=True))
|
|
1601
|
+
# if verbose: tqdm.write(even_print('gmode_dim2' , '%i'%hf_eas4.gmode_dim2 , s=True))
|
|
1602
|
+
# if verbose: tqdm.write(even_print('gmode_dim3' , '%i'%hf_eas4.gmode_dim3 , s=True))
|
|
1603
|
+
|
|
1604
|
+
if verbose: tqdm.write(even_print( 'gmode dim1' , '%i / %s'%( hf_eas4.gmode_dim1, gmode_dict[hf_eas4.gmode_dim1] ), s=True ))
|
|
1605
|
+
if verbose: tqdm.write(even_print( 'gmode dim2' , '%i / %s'%( hf_eas4.gmode_dim2, gmode_dict[hf_eas4.gmode_dim2] ), s=True ))
|
|
1606
|
+
if verbose: tqdm.write(even_print( 'gmode dim3' , '%i / %s'%( hf_eas4.gmode_dim3, gmode_dict[hf_eas4.gmode_dim3] ), s=True ))
|
|
1607
|
+
|
|
1608
|
+
if verbose: tqdm.write(even_print('duration' , '%0.2f'%hf_eas4.duration , s=True))
|
|
1609
|
+
|
|
1610
|
+
# ===
|
|
1611
|
+
|
|
1612
|
+
for ti in range(hf_eas4.nt): ## this EAS4's time indices
|
|
1613
|
+
tii += 1 ## full EAS4 series counter
|
|
1614
|
+
if doRead[tii]:
|
|
1615
|
+
tiii += 1 ## output RGD counter (takes into account skip, min/max)
|
|
1616
|
+
|
|
1617
|
+
buffer_nts_loaded += 1
|
|
1618
|
+
|
|
1619
|
+
#if verbose: tqdm.write(f'writing to buffer at index {tiii%ntbuf:d}') ## debug
|
|
1620
|
+
|
|
1621
|
+
## perform collective read, write to RAM buffer
|
|
1622
|
+
for scalar in hf_eas4.scalars:
|
|
1623
|
+
if (scalar in self.scalars):
|
|
1624
|
+
|
|
1625
|
+
## dset handle in EAS4
|
|
1626
|
+
dset = hf_eas4[f'Data/{hf_eas4.domainName}/ts_{ti:06d}/par_{hf_eas4.scalar_n_map[scalar]:06d}']
|
|
1627
|
+
|
|
1628
|
+
if hf_eas4.dform==1:
|
|
1629
|
+
ds_nx,ds_ny,ds_nz = dset.shape
|
|
1630
|
+
elif hf_eas4.dform==2:
|
|
1631
|
+
ds_nz,ds_ny,ds_nx = dset.shape
|
|
1632
|
+
else:
|
|
1633
|
+
raise RuntimeError
|
|
1634
|
+
|
|
1635
|
+
## EAS4 has a 'collapsed' dimension but >1 ranks in that dim
|
|
1636
|
+
if ( ds_nx < rx ):
|
|
1637
|
+
raise ValueError(f'dset shape in [x] is <rx : dset.shape={str(dset.shape)} , rx={rx:d}')
|
|
1638
|
+
if ( ds_ny < ry ):
|
|
1639
|
+
raise ValueError(f'dset shape in [y] is <ry : dset.shape={str(dset.shape)} , ry={ry:d}')
|
|
1640
|
+
if ( ds_nz < rz ):
|
|
1641
|
+
raise ValueError(f'dset shape in [z] is <rz : dset.shape={str(dset.shape)} , rz={rz:d}')
|
|
1642
|
+
|
|
1643
|
+
if hf_eas4.usingmpi: comm_eas4.Barrier()
|
|
1644
|
+
t_start = timeit.default_timer()
|
|
1645
|
+
|
|
1646
|
+
if hf_eas4.usingmpi:
|
|
1647
|
+
if self.hasGridFilter:
|
|
1648
|
+
with dset.collective:
|
|
1649
|
+
if hf_eas4.dform==1:
|
|
1650
|
+
d_ = dset[rx1R:rx2R,ry1R:ry2R,rz1R:rz2R]
|
|
1651
|
+
elif hf_eas4.dform==2:
|
|
1652
|
+
d_ = dset[rz1R:rz2R,ry1R:ry2R,rx1R:rx2R]
|
|
1653
|
+
else:
|
|
1654
|
+
raise RuntimeError
|
|
1655
|
+
|
|
1656
|
+
if ( ds_nx == 1 ):
|
|
1657
|
+
xfi_local = [0,]
|
|
1658
|
+
if ( ds_ny == 1 ):
|
|
1659
|
+
yfi_local = [0,]
|
|
1660
|
+
if ( ds_nz == 1 ):
|
|
1661
|
+
zfi_local = [0,]
|
|
1662
|
+
|
|
1663
|
+
databuf[scalar][tiii%ntbuf,:,:,:] = d_[ np.ix_(xfi_local,yfi_local,zfi_local) ].T
|
|
1664
|
+
|
|
1665
|
+
else:
|
|
1666
|
+
with dset.collective:
|
|
1667
|
+
|
|
1668
|
+
if hf_eas4.dform==1:
|
|
1669
|
+
databuf[scalar][tiii%ntbuf,:,:,:] = dset[rx1:rx2,ry1:ry2,rz1:rz2].T
|
|
1670
|
+
elif hf_eas4.dform==2:
|
|
1671
|
+
databuf[scalar][tiii%ntbuf,:,:,:] = dset[rz1:rz2,ry1:ry2,rx1:rx2]
|
|
1672
|
+
else:
|
|
1673
|
+
raise RuntimeError
|
|
1674
|
+
|
|
1675
|
+
else:
|
|
1676
|
+
if self.hasGridFilter:
|
|
1677
|
+
d_ = dset[()]
|
|
1678
|
+
databuf[scalar][tiii%ntbuf,:,:,:] = d_[ np.ix_(self.xfi,self.yfi,self.zfi) ].T
|
|
1679
|
+
else:
|
|
1680
|
+
if hf_eas4.dform==1:
|
|
1681
|
+
databuf[scalar][tiii%ntbuf,:,:,:] = dset[()].T
|
|
1682
|
+
elif hf_eas4.dform==2:
|
|
1683
|
+
databuf[scalar][tiii%ntbuf,:,:,:] = dset[()]
|
|
1684
|
+
else:
|
|
1685
|
+
raise RuntimeError
|
|
1686
|
+
|
|
1687
|
+
if hf_eas4.usingmpi: comm_eas4.Barrier()
|
|
1688
|
+
t_delta = timeit.default_timer() - t_start
|
|
1689
|
+
|
|
1690
|
+
data_gb = dset.dtype.itemsize * np.prod(dset.shape) / 1024**3
|
|
1691
|
+
t_read += t_delta
|
|
1692
|
+
data_gb_read += data_gb
|
|
1693
|
+
|
|
1694
|
+
if report_reads and verbose:
|
|
1695
|
+
txt = even_print(f'read: {scalar}', f'{data_gb:0.3f} [GB] {t_delta:0.3f} [s] {data_gb/t_delta:0.3f} [GB/s]', s=True)
|
|
1696
|
+
tqdm.write(txt)
|
|
1697
|
+
|
|
1698
|
+
## collective write
|
|
1699
|
+
if (buffer_nts_loaded%ntbuf==0): ## if buffer is full... initiate write
|
|
1700
|
+
buffer_nts_loaded = 0 ## reset
|
|
1701
|
+
buffers_written += 1 ## increment
|
|
1702
|
+
|
|
1703
|
+
## the time index range in RGD to write contents of R/W buffer to
|
|
1704
|
+
ti1 = ntbuf*buffers_written
|
|
1705
|
+
ti2 = ti1+ntbuf
|
|
1706
|
+
#if verbose: tqdm.write(f'performing write: {ti1:d}:{ti2:d}') ## debug
|
|
1707
|
+
|
|
1708
|
+
for scalar in self.scalars:
|
|
1709
|
+
|
|
1710
|
+
dset = self[f'data/{scalar}'] ## dset in RGD
|
|
1711
|
+
|
|
1712
|
+
if self.usingmpi: self.comm.Barrier()
|
|
1713
|
+
t_start = timeit.default_timer()
|
|
1714
|
+
if self.usingmpi:
|
|
1715
|
+
with dset.collective:
|
|
1716
|
+
dset[ti1:ti2,rz1:rz2,ry1:ry2,rx1:rx2] = databuf[scalar][:,:,:,:]
|
|
1717
|
+
else:
|
|
1718
|
+
dset[ti1:ti2,:,:,:] = databuf[scalar][:,:,:,:]
|
|
1719
|
+
|
|
1720
|
+
if self.usingmpi: self.comm.Barrier()
|
|
1721
|
+
t_delta = timeit.default_timer() - t_start
|
|
1722
|
+
|
|
1723
|
+
t_write += t_delta
|
|
1724
|
+
data_gb = ntbuf * databuf[scalar].dtype.itemsize * np.prod(dset.shape[1:]) / 1024**3
|
|
1725
|
+
data_gb_write += data_gb
|
|
1726
|
+
|
|
1727
|
+
if report_writes and verbose:
|
|
1728
|
+
txt = even_print(f'write: {scalar}', f'{data_gb:0.3f} [GB] {t_delta:0.3f} [s] {data_gb/t_delta:0.3f} [GB/s]', s=True)
|
|
1729
|
+
tqdm.write(txt)
|
|
1730
|
+
|
|
1731
|
+
## write zeros to buffer (optional)
|
|
1732
|
+
databuf[scalar][:,:,:,:] = 0.
|
|
1733
|
+
|
|
1734
|
+
if verbose: progress_bar.update() ## progress bar counts buffer dumps
|
|
1735
|
+
|
|
1736
|
+
## (optionally) delete source EAS4 file
|
|
1737
|
+
## 'do_delete.txt' must be present to actually initiate deletion
|
|
1738
|
+
if delete_after_import:
|
|
1739
|
+
if (self.rank==0):
|
|
1740
|
+
if os.path.isfile('do_delete.txt'):
|
|
1741
|
+
tqdm.write(even_print('deleting', fn_eas4, s=True))
|
|
1742
|
+
os.remove(fn_eas4)
|
|
1743
|
+
self.comm.Barrier()
|
|
1744
|
+
|
|
1745
|
+
if verbose: progress_bar.close()
|
|
1746
|
+
|
|
1747
|
+
if hf_eas4.usingmpi: comm_eas4.Barrier()
|
|
1748
|
+
if self.usingmpi: self.comm.Barrier()
|
|
1749
|
+
|
|
1750
|
+
self.get_header(verbose=False)
|
|
1751
|
+
|
|
1752
|
+
# ## get read read/write stopwatch totals all ranks
|
|
1753
|
+
# if not init_dsets_only:
|
|
1754
|
+
# if self.usingmpi:
|
|
1755
|
+
# G = self.comm.gather([data_gb_read, data_gb_write, self.rank], root=0)
|
|
1756
|
+
# G = self.comm.bcast(G, root=0)
|
|
1757
|
+
# data_gb_read = sum([x[0] for x in G])
|
|
1758
|
+
# data_gb_write = sum([x[1] for x in G])
|
|
1759
|
+
|
|
1760
|
+
if init_dsets_only:
|
|
1761
|
+
if verbose: print('>>> init_dsets_only=True, so no EAS4 data was imported')
|
|
1762
|
+
|
|
1763
|
+
if verbose: print(72*'-')
|
|
1764
|
+
if verbose: even_print('nt', '%i'%self.nt )
|
|
1765
|
+
if verbose: even_print('dt', '%0.8f'%self.dt )
|
|
1766
|
+
if verbose: even_print('duration', '%0.2f'%self.duration )
|
|
1767
|
+
|
|
1768
|
+
if not init_dsets_only:
|
|
1769
|
+
if verbose: print(72*'-')
|
|
1770
|
+
if verbose: even_print('time read',format_time_string(t_read))
|
|
1771
|
+
if verbose: even_print('time write',format_time_string(t_write))
|
|
1772
|
+
if verbose: even_print('read total avg', f'{data_gb_read:0.2f} [GB] {t_read:0.2f} [s] {(data_gb_read/t_read):0.3f} [GB/s]')
|
|
1773
|
+
if verbose: even_print('write total avg', f'{data_gb_write:0.2f} [GB] {t_write:0.2f} [s] {(data_gb_write/t_write):0.3f} [GB/s]')
|
|
1774
|
+
|
|
1775
|
+
## report file
|
|
1776
|
+
if self.usingmpi:
|
|
1777
|
+
self.comm.Barrier()
|
|
1778
|
+
if verbose:
|
|
1779
|
+
print(72*'-')
|
|
1780
|
+
even_print( os.path.basename(self.fname), f'{(os.path.getsize(self.fname)/1024**3):0.2f} [GB]')
|
|
1781
|
+
if verbose: print(72*'-')
|
|
1782
|
+
if verbose: print('total time : rgd.import_eas4() : %s'%format_time_string((timeit.default_timer() - t_start_func)))
|
|
1783
|
+
if verbose: print(72*'-')
|
|
1784
|
+
return
|
|
1785
|
+
|
|
1786
|
+
@staticmethod
|
|
1787
|
+
def copy(fn_rgd_src, fn_rgd_tgt, **kwargs):
|
|
1788
|
+
'''
|
|
1789
|
+
copy header info, selected scalars, and [x,y,z,t] range to new RGD file
|
|
1790
|
+
--> this currently does NOT work in serial mode
|
|
1791
|
+
'''
|
|
1792
|
+
|
|
1793
|
+
try:
|
|
1794
|
+
comm = MPI.COMM_WORLD
|
|
1795
|
+
rank = MPI.COMM_WORLD.Get_rank()
|
|
1796
|
+
n_ranks = MPI.COMM_WORLD.Get_size()
|
|
1797
|
+
#except Exception as e:
|
|
1798
|
+
except Exception:
|
|
1799
|
+
print('rgd.copy() currently only works in MPI mode.')
|
|
1800
|
+
raise ## re-raise same exception, preserve traceback
|
|
1801
|
+
|
|
1802
|
+
if (rank==0):
|
|
1803
|
+
verbose = True
|
|
1804
|
+
else:
|
|
1805
|
+
verbose = False
|
|
1806
|
+
|
|
1807
|
+
if verbose: print('\n'+'rgd.copy()'+'\n'+72*'-')
|
|
1808
|
+
t_start_func = timeit.default_timer()
|
|
1809
|
+
|
|
1810
|
+
if not h5py.h5.get_config().mpi:
|
|
1811
|
+
raise NotImplementedError('h5py must be parallel-enabled')
|
|
1812
|
+
|
|
1813
|
+
rx = kwargs.get('rx',1)
|
|
1814
|
+
ry = kwargs.get('ry',1)
|
|
1815
|
+
rz = kwargs.get('rz',1)
|
|
1816
|
+
rt = kwargs.get('rt',1)
|
|
1817
|
+
force = kwargs.get('force',False) ## overwrite or raise error if exists
|
|
1818
|
+
|
|
1819
|
+
ti_min = kwargs.get('ti_min',None)
|
|
1820
|
+
ti_max = kwargs.get('ti_max',None)
|
|
1821
|
+
scalars = kwargs.get('scalars',None)
|
|
1822
|
+
|
|
1823
|
+
chunk_kb = kwargs.get('chunk_kb',4*1024) ## h5 chunk size: default 4 [MB]
|
|
1824
|
+
chunk_constraint = kwargs.get('chunk_constraint',(1,None,None,None)) ## the 'constraint' parameter for sizing h5 chunks
|
|
1825
|
+
chunk_base = kwargs.get('chunk_base',2)
|
|
1826
|
+
|
|
1827
|
+
stripe_count = kwargs.pop('stripe_count' , 16 ) ## for initializing RGD file
|
|
1828
|
+
stripe_size_mb = kwargs.pop('stripe_size_mb' , 2 )
|
|
1829
|
+
|
|
1830
|
+
xi_min = kwargs.get('xi_min',None) ## 4D coordinate
|
|
1831
|
+
xi_max = kwargs.get('xi_max',None)
|
|
1832
|
+
yi_min = kwargs.get('yi_min',None)
|
|
1833
|
+
yi_max = kwargs.get('yi_max',None)
|
|
1834
|
+
zi_min = kwargs.get('zi_min',None)
|
|
1835
|
+
zi_max = kwargs.get('zi_max',None)
|
|
1836
|
+
ti_min = kwargs.get('ti_min',None)
|
|
1837
|
+
ti_max = kwargs.get('ti_max',None)
|
|
1838
|
+
|
|
1839
|
+
ct = kwargs.get('ct',1) ## 'chunks' in time
|
|
1840
|
+
|
|
1841
|
+
xi_step = kwargs.get('xi_step',1)
|
|
1842
|
+
yi_step = kwargs.get('yi_step',1)
|
|
1843
|
+
zi_step = kwargs.get('zi_step',1)
|
|
1844
|
+
|
|
1845
|
+
prec_coords = kwargs.get('prec_coords',None)
|
|
1846
|
+
if (prec_coords is None):
|
|
1847
|
+
prec_coords = 'same'
|
|
1848
|
+
elif (prec_coords=='single'):
|
|
1849
|
+
pass
|
|
1850
|
+
elif (prec_coords=='same'):
|
|
1851
|
+
pass
|
|
1852
|
+
else:
|
|
1853
|
+
raise ValueError('prec_coords not set correctly')
|
|
1854
|
+
|
|
1855
|
+
if (rt!=1):
|
|
1856
|
+
raise AssertionError('rt!=1')
|
|
1857
|
+
if (rx*ry*rz!=n_ranks):
|
|
1858
|
+
raise AssertionError('rx*ry*rz!=n_ranks')
|
|
1859
|
+
if not os.path.isfile(fn_rgd_src):
|
|
1860
|
+
raise FileNotFoundError(f'{fn_rgd_src} not found!')
|
|
1861
|
+
if os.path.isfile(fn_rgd_tgt) and not force:
|
|
1862
|
+
raise FileExistsError(f'{fn_rgd_tgt} already exists. delete it or use \'force=True\' kwarg')
|
|
1863
|
+
|
|
1864
|
+
# ===
|
|
1865
|
+
|
|
1866
|
+
with rgd(fn_rgd_src, 'r', comm=comm, driver='mpio') as hf_src:
|
|
1867
|
+
with rgd(fn_rgd_tgt, 'w', comm=comm, driver='mpio', force=force, stripe_count=stripe_count, stripe_size_mb=stripe_size_mb) as hf_tgt:
|
|
1868
|
+
|
|
1869
|
+
## copy over header info (source --> target)
|
|
1870
|
+
hf_tgt.init_from_rgd(fn_rgd_src)
|
|
1871
|
+
|
|
1872
|
+
if (scalars is None):
|
|
1873
|
+
scalars = hf_src.scalars
|
|
1874
|
+
|
|
1875
|
+
if verbose:
|
|
1876
|
+
even_print('fn_rgd_src' , fn_rgd_src )
|
|
1877
|
+
even_print('nx' , '%i'%hf_src.nx )
|
|
1878
|
+
even_print('ny' , '%i'%hf_src.ny )
|
|
1879
|
+
even_print('nz' , '%i'%hf_src.nz )
|
|
1880
|
+
even_print('nt' , '%i'%hf_src.nt )
|
|
1881
|
+
if verbose: print(72*'-')
|
|
1882
|
+
|
|
1883
|
+
if (rx>hf_src.nx):
|
|
1884
|
+
raise AssertionError('rx>nx')
|
|
1885
|
+
if (ry>hf_src.ny):
|
|
1886
|
+
raise AssertionError('ry>ny')
|
|
1887
|
+
if (rz>hf_src.nz):
|
|
1888
|
+
raise AssertionError('rz>nz')
|
|
1889
|
+
if (rt>hf_src.nt):
|
|
1890
|
+
raise AssertionError('rt>nt')
|
|
1891
|
+
|
|
1892
|
+
## for RGD, just load full grid on every rank
|
|
1893
|
+
x = np.copy( hf_src.x )
|
|
1894
|
+
y = np.copy( hf_src.y )
|
|
1895
|
+
z = np.copy( hf_src.z )
|
|
1896
|
+
t = np.copy( hf_src.t )
|
|
1897
|
+
|
|
1898
|
+
xi = np.arange(hf_src.nx, dtype=np.int64) ## arange index vector, doesnt get touched!
|
|
1899
|
+
yi = np.arange(hf_src.ny, dtype=np.int64)
|
|
1900
|
+
zi = np.arange(hf_src.nz, dtype=np.int64)
|
|
1901
|
+
ti = np.arange(hf_src.nt, dtype=np.int64)
|
|
1902
|
+
|
|
1903
|
+
xfi = np.arange(hf_src.nx, dtype=np.int64) ## gets clipped depending on x/y/z/t_min/max opts
|
|
1904
|
+
yfi = np.arange(hf_src.ny, dtype=np.int64)
|
|
1905
|
+
zfi = np.arange(hf_src.nz, dtype=np.int64)
|
|
1906
|
+
tfi = np.arange(hf_src.nt, dtype=np.int64)
|
|
1907
|
+
|
|
1908
|
+
# === total bounds clip (coordinate index) --> supports negative indexing!
|
|
1909
|
+
|
|
1910
|
+
if True: ## code folding
|
|
1911
|
+
|
|
1912
|
+
if (xi_min is not None):
|
|
1913
|
+
xfi_ = []
|
|
1914
|
+
if verbose:
|
|
1915
|
+
if (xi_min<0):
|
|
1916
|
+
even_print('xi_min', '%i / %i'%(xi_min,xi[xi_min]))
|
|
1917
|
+
else:
|
|
1918
|
+
even_print('xi_min', '%i'%(xi_min,))
|
|
1919
|
+
for c in xfi:
|
|
1920
|
+
if (xi_min<0) and (c>=(hf_src.nx+xi_min)):
|
|
1921
|
+
xfi_.append(c)
|
|
1922
|
+
elif (xi_min>=0) and (c>=xi_min):
|
|
1923
|
+
xfi_.append(c)
|
|
1924
|
+
xfi=np.array(xfi_, dtype=np.int64)
|
|
1925
|
+
else:
|
|
1926
|
+
xi_min = 0
|
|
1927
|
+
|
|
1928
|
+
if (xi_max is not None):
|
|
1929
|
+
xfi_ = []
|
|
1930
|
+
if verbose:
|
|
1931
|
+
if (xi_max<0):
|
|
1932
|
+
even_print('xi_max', '%i / %i'%(xi_max,xi[xi_max]))
|
|
1933
|
+
else:
|
|
1934
|
+
even_print('xi_max', '%i'%(xi_max,))
|
|
1935
|
+
for c in xfi:
|
|
1936
|
+
if (xi_max<0) and (c<=(hf_src.nx+xi_max)):
|
|
1937
|
+
xfi_.append(c)
|
|
1938
|
+
elif (xi_max>=0) and (c<=xi_max):
|
|
1939
|
+
xfi_.append(c)
|
|
1940
|
+
xfi=np.array(xfi_, dtype=np.int64)
|
|
1941
|
+
else:
|
|
1942
|
+
xi_max = xi[-1]
|
|
1943
|
+
|
|
1944
|
+
## check x
|
|
1945
|
+
if ((xi[xi_max]-xi[xi_min]+1)<1):
|
|
1946
|
+
raise ValueError('invalid xi range requested')
|
|
1947
|
+
if (rx>(xi[xi_max]-xi[xi_min]+1)):
|
|
1948
|
+
raise ValueError('more ranks than grid points in x')
|
|
1949
|
+
|
|
1950
|
+
if (yi_min is not None):
|
|
1951
|
+
yfi_ = []
|
|
1952
|
+
if verbose:
|
|
1953
|
+
if (yi_min<0):
|
|
1954
|
+
even_print('yi_min', '%i / %i'%(yi_min,yi[yi_min]))
|
|
1955
|
+
else:
|
|
1956
|
+
even_print('yi_min', '%i'%(yi_min,))
|
|
1957
|
+
for c in yfi:
|
|
1958
|
+
if (yi_min<0) and (c>=(hf_src.ny+yi_min)):
|
|
1959
|
+
yfi_.append(c)
|
|
1960
|
+
elif (yi_min>=0) and (c>=yi_min):
|
|
1961
|
+
yfi_.append(c)
|
|
1962
|
+
yfi=np.array(yfi_, dtype=np.int64)
|
|
1963
|
+
else:
|
|
1964
|
+
yi_min = 0
|
|
1965
|
+
|
|
1966
|
+
if (yi_max is not None):
|
|
1967
|
+
yfi_ = []
|
|
1968
|
+
if verbose:
|
|
1969
|
+
if (yi_max<0):
|
|
1970
|
+
even_print('yi_max', '%i / %i'%(yi_max,yi[yi_max]))
|
|
1971
|
+
else:
|
|
1972
|
+
even_print('yi_max', '%i'%(yi_max,))
|
|
1973
|
+
for c in yfi:
|
|
1974
|
+
if (yi_max<0) and (c<=(hf_src.ny+yi_max)):
|
|
1975
|
+
yfi_.append(c)
|
|
1976
|
+
elif (yi_max>=0) and (c<=yi_max):
|
|
1977
|
+
yfi_.append(c)
|
|
1978
|
+
yfi=np.array(yfi_, dtype=np.int64)
|
|
1979
|
+
else:
|
|
1980
|
+
yi_max = yi[-1]
|
|
1981
|
+
|
|
1982
|
+
## check y
|
|
1983
|
+
if ((yi[yi_max]-yi[yi_min]+1)<1):
|
|
1984
|
+
raise ValueError('invalid yi range requested')
|
|
1985
|
+
if (ry>(yi[yi_max]-yi[yi_min]+1)):
|
|
1986
|
+
raise ValueError('more ranks than grid points in y')
|
|
1987
|
+
|
|
1988
|
+
if (zi_min is not None):
|
|
1989
|
+
zfi_ = []
|
|
1990
|
+
if verbose:
|
|
1991
|
+
if (zi_min<0):
|
|
1992
|
+
even_print('zi_min', '%i / %i'%(zi_min,zi[zi_min]))
|
|
1993
|
+
else:
|
|
1994
|
+
even_print('zi_min', '%i'%(zi_min,))
|
|
1995
|
+
for c in zfi:
|
|
1996
|
+
if (zi_min<0) and (c>=(hf_src.nz+zi_min)):
|
|
1997
|
+
zfi_.append(c)
|
|
1998
|
+
elif (zi_min>=0) and (c>=zi_min):
|
|
1999
|
+
zfi_.append(c)
|
|
2000
|
+
zfi=np.array(zfi_, dtype=np.int64)
|
|
2001
|
+
else:
|
|
2002
|
+
zi_min = 0
|
|
2003
|
+
|
|
2004
|
+
if (zi_max is not None):
|
|
2005
|
+
zfi_ = []
|
|
2006
|
+
if verbose:
|
|
2007
|
+
if (zi_max<0):
|
|
2008
|
+
even_print('zi_max', '%i / %i'%(zi_max,zi[zi_max]))
|
|
2009
|
+
else:
|
|
2010
|
+
even_print('zi_max', '%i'%(zi_max,))
|
|
2011
|
+
for c in zfi:
|
|
2012
|
+
if (zi_max<0) and (c<=(hf_src.nz+zi_max)):
|
|
2013
|
+
zfi_.append(c)
|
|
2014
|
+
elif (zi_max>=0) and (c<=zi_max):
|
|
2015
|
+
zfi_.append(c)
|
|
2016
|
+
zfi=np.array(zfi_, dtype=np.int64)
|
|
2017
|
+
else:
|
|
2018
|
+
zi_max = zi[-1]
|
|
2019
|
+
|
|
2020
|
+
## check z
|
|
2021
|
+
if ((zi[zi_max]-zi[zi_min]+1)<1):
|
|
2022
|
+
raise ValueError('invalid zi range requested')
|
|
2023
|
+
if (rz>(zi[zi_max]-zi[zi_min]+1)):
|
|
2024
|
+
raise ValueError('more ranks than grid points in z')
|
|
2025
|
+
|
|
2026
|
+
if (ti_min is not None):
|
|
2027
|
+
tfi_ = []
|
|
2028
|
+
if verbose:
|
|
2029
|
+
if (ti_min<0):
|
|
2030
|
+
even_print('ti_min', '%i / %i'%(ti_min,ti[ti_min]))
|
|
2031
|
+
else:
|
|
2032
|
+
even_print('ti_min', '%i'%(ti_min,))
|
|
2033
|
+
for c in tfi:
|
|
2034
|
+
if (ti_min<0) and (c>=(hf_src.nt+ti_min)):
|
|
2035
|
+
tfi_.append(c)
|
|
2036
|
+
elif (ti_min>=0) and (c>=ti_min):
|
|
2037
|
+
tfi_.append(c)
|
|
2038
|
+
tfi=np.array(tfi_, dtype=np.int64)
|
|
2039
|
+
else:
|
|
2040
|
+
ti_min = 0
|
|
2041
|
+
|
|
2042
|
+
if (ti_max is not None):
|
|
2043
|
+
tfi_ = []
|
|
2044
|
+
if verbose:
|
|
2045
|
+
if (ti_max<0):
|
|
2046
|
+
even_print('ti_max', '%i / %i'%(ti_max,ti[ti_max]))
|
|
2047
|
+
else:
|
|
2048
|
+
even_print('ti_max', '%i'%(ti_max,))
|
|
2049
|
+
for c in tfi:
|
|
2050
|
+
if (ti_max<0) and (c<=(hf_src.nt+ti_max)):
|
|
2051
|
+
tfi_.append(c)
|
|
2052
|
+
elif (ti_max>=0) and (c<=ti_max):
|
|
2053
|
+
tfi_.append(c)
|
|
2054
|
+
tfi=np.array(tfi_, dtype=np.int64)
|
|
2055
|
+
else:
|
|
2056
|
+
ti_max = ti[-1]
|
|
2057
|
+
|
|
2058
|
+
## check t
|
|
2059
|
+
if ((ti[ti_max]-ti[ti_min]+1)<1):
|
|
2060
|
+
raise ValueError('invalid ti range requested')
|
|
2061
|
+
if (ct>(ti[ti_max]-ti[ti_min]+1)):
|
|
2062
|
+
raise ValueError('more chunks than timesteps')
|
|
2063
|
+
|
|
2064
|
+
# === 3D/4D communicator
|
|
2065
|
+
|
|
2066
|
+
comm4d = hf_src.comm.Create_cart(dims=[rx,ry,rz], periods=[False,False,False], reorder=False)
|
|
2067
|
+
t4d = comm4d.Get_coords(rank)
|
|
2068
|
+
|
|
2069
|
+
rxl_ = np.array_split(xfi,rx)
|
|
2070
|
+
ryl_ = np.array_split(yfi,ry)
|
|
2071
|
+
rzl_ = np.array_split(zfi,rz)
|
|
2072
|
+
#rtl_ = np.array_split(tfi,rt)
|
|
2073
|
+
|
|
2074
|
+
rxl = [[b[0],b[-1]+1] for b in rxl_ ]
|
|
2075
|
+
ryl = [[b[0],b[-1]+1] for b in ryl_ ]
|
|
2076
|
+
rzl = [[b[0],b[-1]+1] for b in rzl_ ]
|
|
2077
|
+
#rtl = [[b[0],b[-1]+1] for b in rtl_ ]
|
|
2078
|
+
|
|
2079
|
+
## the rank-local bounds for READ --> takes into acct clip but not step!
|
|
2080
|
+
rx1, rx2 = rxl[t4d[0]] #; nxr = rx2 - rx1
|
|
2081
|
+
ry1, ry2 = ryl[t4d[1]] #; nyr = ry2 - ry1
|
|
2082
|
+
rz1, rz2 = rzl[t4d[2]] #; nzr = rz2 - rz1
|
|
2083
|
+
#rt1, rt2 = rtl[t4d[3]] #; ntr = rt2 - rt1
|
|
2084
|
+
|
|
2085
|
+
## the global dim sizes for READ
|
|
2086
|
+
nx_read = xfi.shape[0]
|
|
2087
|
+
ny_read = yfi.shape[0]
|
|
2088
|
+
nz_read = zfi.shape[0]
|
|
2089
|
+
|
|
2090
|
+
# === global step
|
|
2091
|
+
|
|
2092
|
+
## take every nth index (of the already bounds-clipped) index-to-take vector
|
|
2093
|
+
xfi = np.copy(xfi[::xi_step])
|
|
2094
|
+
yfi = np.copy(yfi[::yi_step])
|
|
2095
|
+
zfi = np.copy(zfi[::zi_step])
|
|
2096
|
+
|
|
2097
|
+
## the global dim sizes for WRITE
|
|
2098
|
+
nx = xfi.shape[0]
|
|
2099
|
+
ny = yfi.shape[0]
|
|
2100
|
+
nz = zfi.shape[0]
|
|
2101
|
+
|
|
2102
|
+
# ===
|
|
2103
|
+
|
|
2104
|
+
## grid for target file (rectilinear case)
|
|
2105
|
+
x = np.copy(x[xfi]) ## target file
|
|
2106
|
+
y = np.copy(y[yfi])
|
|
2107
|
+
z = np.copy(z[zfi])
|
|
2108
|
+
t = np.copy(t[tfi])
|
|
2109
|
+
|
|
2110
|
+
nx = x.shape[0] ## target file
|
|
2111
|
+
ny = y.shape[0]
|
|
2112
|
+
nz = z.shape[0]
|
|
2113
|
+
nt = t.shape[0]
|
|
2114
|
+
|
|
2115
|
+
if verbose:
|
|
2116
|
+
even_print('fn_rgd_tgt' , fn_rgd_tgt )
|
|
2117
|
+
even_print('nx' , '%i'%nx )
|
|
2118
|
+
even_print('ny' , '%i'%ny )
|
|
2119
|
+
even_print('nz' , '%i'%nz )
|
|
2120
|
+
even_print('nt' , '%i'%nt )
|
|
2121
|
+
print(72*'-')
|
|
2122
|
+
|
|
2123
|
+
## REPLACE coordinate dimension arrays in target file
|
|
2124
|
+
if ('dims/x' in hf_tgt):
|
|
2125
|
+
del hf_tgt['dims/x']
|
|
2126
|
+
hf_tgt.create_dataset('dims/x', data=x, dtype=np.float64, chunks=None)
|
|
2127
|
+
if ('dims/y' in hf_tgt):
|
|
2128
|
+
del hf_tgt['dims/y']
|
|
2129
|
+
hf_tgt.create_dataset('dims/y', data=y, dtype=np.float64, chunks=None)
|
|
2130
|
+
if ('dims/z' in hf_tgt):
|
|
2131
|
+
del hf_tgt['dims/z']
|
|
2132
|
+
hf_tgt.create_dataset('dims/z', data=z, dtype=np.float64, chunks=None)
|
|
2133
|
+
if ('dims/t' in hf_tgt):
|
|
2134
|
+
del hf_tgt['dims/t']
|
|
2135
|
+
hf_tgt.create_dataset('dims/t', data=t, dtype=np.float64, chunks=None)
|
|
2136
|
+
|
|
2137
|
+
# ## write filter index arrays to file
|
|
2138
|
+
# if ('filters/xfi' in hf_tgt):
|
|
2139
|
+
# del hf_tgt['filters/xfi']
|
|
2140
|
+
# hf_tgt.create_dataset('filters/xfi', data=xfi, dtype=np.int64, chunks=None)
|
|
2141
|
+
# if ('filters/yfi' in hf_tgt):
|
|
2142
|
+
# del hf_tgt['filters/yfi']
|
|
2143
|
+
# hf_tgt.create_dataset('filters/yfi', data=yfi, dtype=np.int64, chunks=None)
|
|
2144
|
+
# if ('filters/zfi' in hf_tgt):
|
|
2145
|
+
# del hf_tgt['filters/zfi']
|
|
2146
|
+
# hf_tgt.create_dataset('filters/zfi', data=zfi, dtype=np.int64, chunks=None)
|
|
2147
|
+
|
|
2148
|
+
# === bounds for outfile WRITE
|
|
2149
|
+
|
|
2150
|
+
xiw = np.array( [ i for i in xfi if all([(i>=rx1),(i<rx2)]) ], dtype=np.int32 ) ## the global indices in my local rank, taking into acct clip AND step
|
|
2151
|
+
nxiw = xiw.shape[0]
|
|
2152
|
+
xiw_off = len([ i for i in xfi if (i<rx1) ]) ## this rank's left offset in the OUTFILE context
|
|
2153
|
+
rx1w = xiw_off
|
|
2154
|
+
rx2w = xiw_off + nxiw
|
|
2155
|
+
|
|
2156
|
+
yiw = np.array( [ i for i in yfi if all([(i>=ry1),(i<ry2)]) ], dtype=np.int32 )
|
|
2157
|
+
nyiw = yiw.shape[0]
|
|
2158
|
+
yiw_off = len([ i for i in yfi if (i<ry1) ])
|
|
2159
|
+
ry1w = yiw_off
|
|
2160
|
+
ry2w = yiw_off + nyiw
|
|
2161
|
+
|
|
2162
|
+
ziw = np.array( [ i for i in zfi if all([(i>=rz1),(i<rz2)]) ], dtype=np.int32 )
|
|
2163
|
+
nziw = ziw.shape[0]
|
|
2164
|
+
ziw_off = len([ i for i in zfi if (i<rz1) ])
|
|
2165
|
+
rz1w = ziw_off
|
|
2166
|
+
rz2w = ziw_off + nziw
|
|
2167
|
+
|
|
2168
|
+
## xiw,yiw,ziw are used to 'filter' the rank-local data that is read in
|
|
2169
|
+
## xiw,yiw,ziw are currently in the global context, so we need to subtract off the left READ bound
|
|
2170
|
+
## which is NOT just the min xiw
|
|
2171
|
+
xiw -= rx1
|
|
2172
|
+
yiw -= ry1
|
|
2173
|
+
ziw -= rz1
|
|
2174
|
+
|
|
2175
|
+
# ===
|
|
2176
|
+
|
|
2177
|
+
## time 'chunks' split (number of timesteps to read / write at a time)
|
|
2178
|
+
ctl_ = np.array_split(tfi,ct)
|
|
2179
|
+
ctl = [[b[0],b[-1]+1] for b in ctl_ ]
|
|
2180
|
+
|
|
2181
|
+
shape = (nt,nz,ny,nx) ## target
|
|
2182
|
+
hf_tgt.scalars = []
|
|
2183
|
+
|
|
2184
|
+
# ======================================================
|
|
2185
|
+
# initialize
|
|
2186
|
+
# ======================================================
|
|
2187
|
+
|
|
2188
|
+
if verbose:
|
|
2189
|
+
progress_bar = tqdm(
|
|
2190
|
+
total=len( [ s for s in hf_src.scalars if (s in scalars) ] ),
|
|
2191
|
+
ncols=100,
|
|
2192
|
+
desc='initialize dsets',
|
|
2193
|
+
leave=True,
|
|
2194
|
+
file=sys.stdout,
|
|
2195
|
+
mininterval=0.1,
|
|
2196
|
+
smoothing=0.,
|
|
2197
|
+
#bar_format="\033[B{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}\033[A\n\b",
|
|
2198
|
+
bar_format="{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}",
|
|
2199
|
+
ascii="░█",
|
|
2200
|
+
colour='#FF6600',
|
|
2201
|
+
)
|
|
2202
|
+
|
|
2203
|
+
## initialize scalar datasets
|
|
2204
|
+
t_start = timeit.default_timer()
|
|
2205
|
+
for scalar in hf_src.scalars:
|
|
2206
|
+
|
|
2207
|
+
dtype = hf_src.scalars_dtypes_dict[scalar]
|
|
2208
|
+
chunks = h5_chunk_sizer(nxi=shape, constraint=chunk_constraint, size_kb=chunk_kb, base=chunk_base, itemsize=dtype.itemsize)
|
|
2209
|
+
data_gb = dtype.itemsize * nx * ny * nz * nt / 1024**3
|
|
2210
|
+
|
|
2211
|
+
if (scalar in scalars):
|
|
2212
|
+
if verbose:
|
|
2213
|
+
tqdm.write(even_print(f'initializing data/{scalar}', f'{data_gb:0.2f} [GB]', s=True))
|
|
2214
|
+
|
|
2215
|
+
dset = hf_tgt.create_dataset(
|
|
2216
|
+
'data/%s'%scalar,
|
|
2217
|
+
shape=shape,
|
|
2218
|
+
dtype=dtype,
|
|
2219
|
+
chunks=chunks,
|
|
2220
|
+
)
|
|
2221
|
+
hf_tgt.scalars.append(scalar)
|
|
2222
|
+
|
|
2223
|
+
chunk_kb_ = np.prod(dset.chunks)*dset.dtype.itemsize / 1024.
|
|
2224
|
+
if verbose:
|
|
2225
|
+
tqdm.write(even_print('chunk shape (t,z,y,x)', str(dset.chunks), s=True))
|
|
2226
|
+
tqdm.write(even_print('chunk size', f'{int(round(chunk_kb_)):d} [KB]', s=True))
|
|
2227
|
+
|
|
2228
|
+
if verbose:
|
|
2229
|
+
progress_bar.update()
|
|
2230
|
+
|
|
2231
|
+
hf_tgt.comm.Barrier()
|
|
2232
|
+
if verbose:
|
|
2233
|
+
progress_bar.close()
|
|
2234
|
+
|
|
2235
|
+
t_initialize = timeit.default_timer() - t_start
|
|
2236
|
+
if verbose:
|
|
2237
|
+
print(72*'-')
|
|
2238
|
+
even_print('time initialize',format_time_string(t_initialize))
|
|
2239
|
+
print(72*'-')
|
|
2240
|
+
|
|
2241
|
+
# ===
|
|
2242
|
+
|
|
2243
|
+
hf_tgt.n_scalars = len(hf_tgt.scalars)
|
|
2244
|
+
|
|
2245
|
+
# ===
|
|
2246
|
+
|
|
2247
|
+
data_gb_read = 0.
|
|
2248
|
+
data_gb_write = 0.
|
|
2249
|
+
t_read = 0.
|
|
2250
|
+
t_write = 0.
|
|
2251
|
+
|
|
2252
|
+
if verbose:
|
|
2253
|
+
progress_bar = tqdm(
|
|
2254
|
+
total=len(ctl)*hf_tgt.n_scalars,
|
|
2255
|
+
ncols=100,
|
|
2256
|
+
desc='copy',
|
|
2257
|
+
leave=True,
|
|
2258
|
+
file=sys.stdout,
|
|
2259
|
+
mininterval=0.1,
|
|
2260
|
+
smoothing=0.,
|
|
2261
|
+
#bar_format="\033[B{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}\033[A\n\b",
|
|
2262
|
+
bar_format="{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}",
|
|
2263
|
+
ascii="░█",
|
|
2264
|
+
colour='#FF6600',
|
|
2265
|
+
)
|
|
2266
|
+
|
|
2267
|
+
for scalar in hf_tgt.scalars:
|
|
2268
|
+
dset_src = hf_src[f'data/{scalar}']
|
|
2269
|
+
dset_tgt = hf_tgt[f'data/{scalar}']
|
|
2270
|
+
|
|
2271
|
+
dtype = dset_src.dtype
|
|
2272
|
+
|
|
2273
|
+
for ctl_ in ctl:
|
|
2274
|
+
|
|
2275
|
+
ct1, ct2 = ctl_
|
|
2276
|
+
|
|
2277
|
+
ct1_ = ct1 - ti[ti_min] ## coords in target file
|
|
2278
|
+
ct2_ = ct2 - ti[ti_min]
|
|
2279
|
+
|
|
2280
|
+
## read
|
|
2281
|
+
hf_src.comm.Barrier()
|
|
2282
|
+
t_start = timeit.default_timer()
|
|
2283
|
+
with dset_src.collective:
|
|
2284
|
+
data = dset_src[ct1:ct2,rz1:rz2,ry1:ry2,rx1:rx2].T
|
|
2285
|
+
hf_src.comm.Barrier()
|
|
2286
|
+
t_delta = timeit.default_timer() - t_start
|
|
2287
|
+
data_gb = dtype.itemsize * nx_read * ny_read * nz_read * (ct2-ct1) / 1024**3
|
|
2288
|
+
|
|
2289
|
+
t_read += t_delta
|
|
2290
|
+
data_gb_read += data_gb
|
|
2291
|
+
|
|
2292
|
+
if verbose:
|
|
2293
|
+
tqdm.write(even_print(f'read: {scalar}', f'{data_gb:0.3f} [GB] {t_delta:0.3f} [s] {data_gb/t_delta:0.3f} [GB/s]', s=True))
|
|
2294
|
+
|
|
2295
|
+
try:
|
|
2296
|
+
data_out = np.copy( data[ np.ix_(xiw,yiw,ziw) ] )
|
|
2297
|
+
#data_out = np.copy( data[ xiw[:,np.newaxis,np.newaxis], yiw[np.newaxis,:,np.newaxis], ziw[np.newaxis,np.newaxis,:] ] )
|
|
2298
|
+
except Exception:
|
|
2299
|
+
print('rgd.copy() : error in xiw,yiw,ziw')
|
|
2300
|
+
MPI.COMM_WORLD.Abort(1)
|
|
2301
|
+
|
|
2302
|
+
## write
|
|
2303
|
+
hf_tgt.comm.Barrier()
|
|
2304
|
+
t_start = timeit.default_timer()
|
|
2305
|
+
with dset_tgt.collective:
|
|
2306
|
+
dset_tgt[ct1_:ct2_,rz1w:rz2w,ry1w:ry2w,rx1w:rx2w] = data_out.T
|
|
2307
|
+
hf_tgt.flush()
|
|
2308
|
+
hf_tgt.comm.Barrier()
|
|
2309
|
+
t_delta = timeit.default_timer() - t_start
|
|
2310
|
+
data_gb = dtype.itemsize * nx*ny*nz * (ct2-ct1) / 1024**3
|
|
2311
|
+
|
|
2312
|
+
t_write += t_delta
|
|
2313
|
+
data_gb_write += data_gb
|
|
2314
|
+
|
|
2315
|
+
if verbose:
|
|
2316
|
+
tqdm.write(even_print(f'write: {scalar}', f'{data_gb:0.3f} [GB] {t_delta:0.3f} [s] {data_gb/t_delta:0.3f} [GB/s]', s=True))
|
|
2317
|
+
|
|
2318
|
+
if verbose:
|
|
2319
|
+
progress_bar.update()
|
|
2320
|
+
|
|
2321
|
+
if verbose:
|
|
2322
|
+
progress_bar.close()
|
|
2323
|
+
|
|
2324
|
+
if verbose: print(72*'-')
|
|
2325
|
+
if verbose: even_print('time initialize',format_time_string(t_initialize))
|
|
2326
|
+
if verbose: even_print('time read',format_time_string(t_read))
|
|
2327
|
+
if verbose: even_print('time write',format_time_string(t_write))
|
|
2328
|
+
if verbose: even_print('read total avg', f'{data_gb_read:0.2f} [GB] {t_read:0.2f} [s] {(data_gb_read/t_read):0.3f} [GB/s]')
|
|
2329
|
+
if verbose: even_print('write total avg', f'{data_gb_write:0.2f} [GB] {t_write:0.2f} [s] {(data_gb_write/t_write):0.3f} [GB/s]')
|
|
2330
|
+
if verbose: print(72*'-')
|
|
2331
|
+
if verbose: even_print( os.path.basename(fn_rgd_src), f'{(os.path.getsize(fn_rgd_src)/1024**3):0.2f} [GB]')
|
|
2332
|
+
if verbose: even_print( os.path.basename(fn_rgd_tgt), f'{(os.path.getsize(fn_rgd_tgt)/1024**3):0.2f} [GB]')
|
|
2333
|
+
if verbose: print(72*'-')
|
|
2334
|
+
if verbose: print('total time : rgd.copy() : %s'%format_time_string((timeit.default_timer() - t_start_func)))
|
|
2335
|
+
if verbose: print(72*'-')
|
|
2336
|
+
return
|
|
2337
|
+
|
|
2338
|
+
def make_xdmf(self, **kwargs):
|
|
2339
|
+
'''
|
|
2340
|
+
Generate an XDMF/XMF2 from RGD for processing with Paraview
|
|
2341
|
+
-----
|
|
2342
|
+
https://www.xdmf.org/index.php/XDMF_Model_and_Format
|
|
2343
|
+
'''
|
|
2344
|
+
|
|
2345
|
+
if (self.rank==0):
|
|
2346
|
+
verbose = True
|
|
2347
|
+
else:
|
|
2348
|
+
verbose = False
|
|
2349
|
+
|
|
2350
|
+
makeVectors = kwargs.get('makeVectors',True) ## write vectors (e.g. velocity, vorticity) to XDMF
|
|
2351
|
+
makeTensors = kwargs.get('makeTensors',True) ## write 3x3 tensors (e.g. stress, strain) to XDMF
|
|
2352
|
+
|
|
2353
|
+
fname_path = os.path.dirname(self.fname)
|
|
2354
|
+
fname_base = os.path.basename(self.fname)
|
|
2355
|
+
fname_root, fname_ext = os.path.splitext(fname_base)
|
|
2356
|
+
fname_xdmf_base = fname_root+'.xmf2'
|
|
2357
|
+
fname_xdmf = os.path.join(fname_path, fname_xdmf_base)
|
|
2358
|
+
|
|
2359
|
+
if verbose: print('\n'+'rgd.make_xdmf()'+'\n'+72*'-')
|
|
2360
|
+
|
|
2361
|
+
dataset_precision_dict = {} ## holds dtype.itemsize ints i.e. 4,8
|
|
2362
|
+
dataset_numbertype_dict = {} ## holds string description of dtypes i.e. 'Float','Integer'
|
|
2363
|
+
|
|
2364
|
+
# === 1D coordinate dimension vectors --> get dtype.name
|
|
2365
|
+
for scalar in ['x','y','z']:
|
|
2366
|
+
if ('dims/'+scalar in self):
|
|
2367
|
+
data = self['dims/'+scalar]
|
|
2368
|
+
dataset_precision_dict[scalar] = data.dtype.itemsize
|
|
2369
|
+
if (data.dtype.name=='float32') or (data.dtype.name=='float64'):
|
|
2370
|
+
dataset_numbertype_dict[scalar] = 'Float'
|
|
2371
|
+
elif (data.dtype.name=='int8') or (data.dtype.name=='int16') or (data.dtype.name=='int32') or (data.dtype.name=='int64'):
|
|
2372
|
+
dataset_numbertype_dict[scalar] = 'Integer'
|
|
2373
|
+
else:
|
|
2374
|
+
raise ValueError('dtype not recognized, please update script accordingly')
|
|
2375
|
+
|
|
2376
|
+
# scalar names dict
|
|
2377
|
+
# --> labels for Paraview could be customized (e.g. units could be added) using a dict
|
|
2378
|
+
# --> the block below shows one such example dict, though it is currently inactive
|
|
2379
|
+
|
|
2380
|
+
if False:
|
|
2381
|
+
units = 'dimless'
|
|
2382
|
+
if (units=='SI') or (units=='si'): ## m,s,kg,K
|
|
2383
|
+
scalar_names = {'x':'x [m]',
|
|
2384
|
+
'y':'y [m]',
|
|
2385
|
+
'z':'z [m]',
|
|
2386
|
+
'u':'u [m/s]',
|
|
2387
|
+
'v':'v [m/s]',
|
|
2388
|
+
'w':'w [m/s]',
|
|
2389
|
+
'T':'T [K]',
|
|
2390
|
+
'rho':'rho [kg/m^3]',
|
|
2391
|
+
'p':'p [Pa]'}
|
|
2392
|
+
elif (units=='dimless') or (units=='dimensionless'):
|
|
2393
|
+
scalar_names = {'x':'x [dimless]',
|
|
2394
|
+
'y':'y [dimless]',
|
|
2395
|
+
'z':'z [dimless]',
|
|
2396
|
+
'u':'u [dimless]',
|
|
2397
|
+
'v':'v [dimless]',
|
|
2398
|
+
'w':'w [dimless]',
|
|
2399
|
+
'T':'T [dimless]',
|
|
2400
|
+
'rho':'rho [dimless]',
|
|
2401
|
+
'p':'p [dimless]'}
|
|
2402
|
+
else:
|
|
2403
|
+
raise ValueError('choice of units not recognized : %s --> options are : %s / %s'%(units,'SI','dimless'))
|
|
2404
|
+
else:
|
|
2405
|
+
scalar_names = {} ## dummy/empty
|
|
2406
|
+
|
|
2407
|
+
## refresh header
|
|
2408
|
+
self.get_header(verbose=False)
|
|
2409
|
+
|
|
2410
|
+
for scalar in self.scalars:
|
|
2411
|
+
data = self['data/%s'%scalar]
|
|
2412
|
+
|
|
2413
|
+
dataset_precision_dict[scalar] = data.dtype.itemsize
|
|
2414
|
+
txt = '%s%s%s%s%s'%(data.dtype.itemsize, ' '*(4-len(str(data.dtype.itemsize))), data.dtype.name, ' '*(10-len(str(data.dtype.name))), data.dtype.byteorder)
|
|
2415
|
+
if verbose: even_print(scalar, txt)
|
|
2416
|
+
|
|
2417
|
+
if (data.dtype.name=='float32') or (data.dtype.name=='float64'):
|
|
2418
|
+
dataset_numbertype_dict[scalar] = 'Float'
|
|
2419
|
+
elif (data.dtype.name=='int8') or (data.dtype.name=='int16') or (data.dtype.name=='int32') or (data.dtype.name=='int64'):
|
|
2420
|
+
dataset_numbertype_dict[scalar] = 'Integer'
|
|
2421
|
+
else:
|
|
2422
|
+
raise TypeError('dtype not recognized, please update script accordingly')
|
|
2423
|
+
|
|
2424
|
+
if verbose: print(72*'-')
|
|
2425
|
+
|
|
2426
|
+
# === write to .xdmf/.xmf2 file
|
|
2427
|
+
if (self.rank==0):
|
|
2428
|
+
|
|
2429
|
+
if not os.path.isfile(fname_xdmf): ## if doesnt exist...
|
|
2430
|
+
Path(fname_xdmf).touch() ## touch XDMF file
|
|
2431
|
+
perms_h5 = oct(os.stat(self.fname).st_mode)[-3:] ## get permissions of RGD file
|
|
2432
|
+
os.chmod(fname_xdmf, int(perms_h5, base=8)) ## change permissions of XDMF file
|
|
2433
|
+
|
|
2434
|
+
#with open(fname_xdmf,'w') as xdmf:
|
|
2435
|
+
with io.open(fname_xdmf,'w',newline='\n') as xdmf:
|
|
2436
|
+
|
|
2437
|
+
xdmf_str='''
|
|
2438
|
+
<?xml version="1.0" encoding="utf-8"?>
|
|
2439
|
+
<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" []>
|
|
2440
|
+
<Xdmf xmlns:xi="http://www.w3.org/2001/XInclude" Version="2.0">
|
|
2441
|
+
<Domain>
|
|
2442
|
+
'''
|
|
2443
|
+
|
|
2444
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 0*' '))
|
|
2445
|
+
|
|
2446
|
+
## Dimensions can also be NumberOfElements
|
|
2447
|
+
xdmf_str=f'''
|
|
2448
|
+
<Topology TopologyType="3DRectMesh" NumberOfElements="{self.nz:d} {self.ny:d} {self.nx:d}"/>
|
|
2449
|
+
<Geometry GeometryType="VxVyVz">
|
|
2450
|
+
<DataItem Dimensions="{self.nx:d}" NumberType="{dataset_numbertype_dict['x']}" Precision="{dataset_precision_dict['x']:d}" Format="HDF">
|
|
2451
|
+
{fname_base}:/dims/{'x'}
|
|
2452
|
+
</DataItem>
|
|
2453
|
+
<DataItem Dimensions="{self.ny:d}" NumberType="{dataset_numbertype_dict['y']}" Precision="{dataset_precision_dict['y']:d}" Format="HDF">
|
|
2454
|
+
{fname_base}:/dims/{'y'}
|
|
2455
|
+
</DataItem>
|
|
2456
|
+
<DataItem Dimensions="{self.nz:d}" NumberType="{dataset_numbertype_dict['z']}" Precision="{dataset_precision_dict['z']:d}" Format="HDF">
|
|
2457
|
+
{fname_base}:/dims/{'z'}
|
|
2458
|
+
</DataItem>
|
|
2459
|
+
</Geometry>
|
|
2460
|
+
'''
|
|
2461
|
+
|
|
2462
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 4*' '))
|
|
2463
|
+
|
|
2464
|
+
# ===
|
|
2465
|
+
|
|
2466
|
+
xdmf_str='''
|
|
2467
|
+
<!-- ==================== time series ==================== -->
|
|
2468
|
+
'''
|
|
2469
|
+
|
|
2470
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 4*' '))
|
|
2471
|
+
|
|
2472
|
+
# === the time series
|
|
2473
|
+
|
|
2474
|
+
xdmf_str='''
|
|
2475
|
+
<Grid Name="TimeSeries" GridType="Collection" CollectionType="Temporal">
|
|
2476
|
+
'''
|
|
2477
|
+
|
|
2478
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 4*' '))
|
|
2479
|
+
|
|
2480
|
+
for ti in range(len(self.t)):
|
|
2481
|
+
|
|
2482
|
+
dset_name = 'ts_%08d'%ti
|
|
2483
|
+
|
|
2484
|
+
xdmf_str='''
|
|
2485
|
+
<!-- ============================================================ -->
|
|
2486
|
+
'''
|
|
2487
|
+
|
|
2488
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 6*' '))
|
|
2489
|
+
|
|
2490
|
+
# =====
|
|
2491
|
+
|
|
2492
|
+
xdmf_str=f'''
|
|
2493
|
+
<Grid Name="{dset_name}" GridType="Uniform">
|
|
2494
|
+
<Time TimeType="Single" Value="{self.t[ti]:0.8E}"/>
|
|
2495
|
+
<Topology Reference="/Xdmf/Domain/Topology[1]" />
|
|
2496
|
+
<Geometry Reference="/Xdmf/Domain/Geometry[1]" />
|
|
2497
|
+
'''
|
|
2498
|
+
|
|
2499
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 6*' '))
|
|
2500
|
+
|
|
2501
|
+
# ===== .xdmf : <Grid> per scalar
|
|
2502
|
+
|
|
2503
|
+
for scalar in self.scalars:
|
|
2504
|
+
|
|
2505
|
+
dset_hf_path = 'data/%s'%scalar
|
|
2506
|
+
|
|
2507
|
+
## get optional 'label' for Paraview (currently inactive)
|
|
2508
|
+
if scalar in scalar_names:
|
|
2509
|
+
scalar_name = scalar_names[scalar]
|
|
2510
|
+
else:
|
|
2511
|
+
scalar_name = scalar
|
|
2512
|
+
|
|
2513
|
+
xdmf_str=f'''
|
|
2514
|
+
<!-- ===== scalar : {scalar} ===== -->
|
|
2515
|
+
<Attribute Name="{scalar_name}" AttributeType="Scalar" Center="Node">
|
|
2516
|
+
<DataItem ItemType="HyperSlab" Dimensions="{self.nz:d} {self.ny:d} {self.nx:d}" Type="HyperSlab">
|
|
2517
|
+
<DataItem Dimensions="3 4" NumberType="Integer" Format="XML">
|
|
2518
|
+
{ti:<6d} {0:<6d} {0:<6d} {0:<6d}
|
|
2519
|
+
{1:<6d} {1:<6d} {1:<6d} {1:<6d}
|
|
2520
|
+
{1:<6d} {self.nz:<6d} {self.ny:<6d} {self.nx:<6d}
|
|
2521
|
+
</DataItem>
|
|
2522
|
+
<DataItem Dimensions="{self.nt:d} {self.nz:d} {self.ny:d} {self.nx:d}" NumberType="{dataset_numbertype_dict[scalar]}" Precision="{dataset_precision_dict[scalar]:d}" Format="HDF">
|
|
2523
|
+
{fname_base}:/{dset_hf_path}
|
|
2524
|
+
</DataItem>
|
|
2525
|
+
</DataItem>
|
|
2526
|
+
</Attribute>
|
|
2527
|
+
'''
|
|
2528
|
+
|
|
2529
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 8*' '))
|
|
2530
|
+
|
|
2531
|
+
if makeVectors:
|
|
2532
|
+
|
|
2533
|
+
# === .xdmf : <Grid> per vector : velocity vector
|
|
2534
|
+
|
|
2535
|
+
if ('u' in self.scalars) and ('v' in self.scalars) and ('w' in self.scalars):
|
|
2536
|
+
|
|
2537
|
+
scalar_name = 'velocity'
|
|
2538
|
+
dset_hf_path_i = 'data/u'
|
|
2539
|
+
dset_hf_path_j = 'data/v'
|
|
2540
|
+
dset_hf_path_k = 'data/w'
|
|
2541
|
+
|
|
2542
|
+
xdmf_str = f'''
|
|
2543
|
+
<!-- ===== vector : {scalar_name} ===== -->
|
|
2544
|
+
<Attribute Name="{scalar_name}" AttributeType="Vector" Center="Node">
|
|
2545
|
+
<DataItem Dimensions="{self.nz:d} {self.ny:d} {self.nx:d} {3:d}" Function="JOIN($0, $1, $2)" ItemType="Function">
|
|
2546
|
+
<!-- 1 -->
|
|
2547
|
+
<DataItem ItemType="HyperSlab" Dimensions="{self.nz:d} {self.ny:d} {self.nx:d}" Type="HyperSlab">
|
|
2548
|
+
<DataItem Dimensions="3 4" NumberType="Integer" Format="XML">
|
|
2549
|
+
{ti:<6d} {0:<6d} {0:<6d} {0:<6d}
|
|
2550
|
+
{1:<6d} {1:<6d} {1:<6d} {1:<6d}
|
|
2551
|
+
{1:<6d} {self.nz:<6d} {self.ny:<6d} {self.nx:<6d}
|
|
2552
|
+
</DataItem>
|
|
2553
|
+
<DataItem Dimensions="{self.nt:d} {self.nz:d} {self.ny:d} {self.nx:d}" NumberType="{dataset_numbertype_dict['u']}" Precision="{dataset_precision_dict['u']:d}" Format="HDF">
|
|
2554
|
+
{fname_base}:/{dset_hf_path_i}
|
|
2555
|
+
</DataItem>
|
|
2556
|
+
</DataItem>
|
|
2557
|
+
<!-- 2 -->
|
|
2558
|
+
<DataItem ItemType="HyperSlab" Dimensions="{self.nz:d} {self.ny:d} {self.nx:d}" Type="HyperSlab">
|
|
2559
|
+
<DataItem Dimensions="3 4" NumberType="Integer" Format="XML">
|
|
2560
|
+
{ti:<6d} {0:<6d} {0:<6d} {0:<6d}
|
|
2561
|
+
{1:<6d} {1:<6d} {1:<6d} {1:<6d}
|
|
2562
|
+
{1:<6d} {self.nz:<6d} {self.ny:<6d} {self.nx:<6d}
|
|
2563
|
+
</DataItem>
|
|
2564
|
+
<DataItem Dimensions="{self.nt:d} {self.nz:d} {self.ny:d} {self.nx:d}" NumberType="{dataset_numbertype_dict['v']}" Precision="{dataset_precision_dict['v']:d}" Format="HDF">
|
|
2565
|
+
{fname_base}:/{dset_hf_path_j}
|
|
2566
|
+
</DataItem>
|
|
2567
|
+
</DataItem>
|
|
2568
|
+
<!-- 3 -->
|
|
2569
|
+
<DataItem ItemType="HyperSlab" Dimensions="{self.nz:d} {self.ny:d} {self.nx:d}" Type="HyperSlab">
|
|
2570
|
+
<DataItem Dimensions="3 4" NumberType="Integer" Format="XML">
|
|
2571
|
+
{ti:<6d} {0:<6d} {0:<6d} {0:<6d}
|
|
2572
|
+
{1:<6d} {1:<6d} {1:<6d} {1:<6d}
|
|
2573
|
+
{1:<6d} {self.nz:<6d} {self.ny:<6d} {self.nx:<6d}
|
|
2574
|
+
</DataItem>
|
|
2575
|
+
<DataItem Dimensions="{self.nt:d} {self.nz:d} {self.ny:d} {self.nx:d}" NumberType="{dataset_numbertype_dict['w']}" Precision="{dataset_precision_dict['w']:d}" Format="HDF">
|
|
2576
|
+
{fname_base}:/{dset_hf_path_k}
|
|
2577
|
+
</DataItem>
|
|
2578
|
+
</DataItem>
|
|
2579
|
+
<!-- - -->
|
|
2580
|
+
</DataItem>
|
|
2581
|
+
</Attribute>
|
|
2582
|
+
'''
|
|
2583
|
+
|
|
2584
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 8*' '))
|
|
2585
|
+
|
|
2586
|
+
# === .xdmf : <Grid> per vector : vorticity vector
|
|
2587
|
+
|
|
2588
|
+
if ('vort_x' in self.scalars) and ('vort_y' in self.scalars) and ('vort_z' in self.scalars):
|
|
2589
|
+
|
|
2590
|
+
scalar_name = 'vorticity'
|
|
2591
|
+
dset_hf_path_i = 'data/vort_x'
|
|
2592
|
+
dset_hf_path_j = 'data/vort_y'
|
|
2593
|
+
dset_hf_path_k = 'data/vort_z'
|
|
2594
|
+
|
|
2595
|
+
xdmf_str = f'''
|
|
2596
|
+
<!-- ===== vector : {scalar_name} ===== -->
|
|
2597
|
+
<Attribute Name="{scalar_name}" AttributeType="Vector" Center="Node">
|
|
2598
|
+
<DataItem Dimensions="{self.nz:d} {self.ny:d} {self.nx:d} {3:d}" Function="JOIN($0, $1, $2)" ItemType="Function">
|
|
2599
|
+
<!-- 1 -->
|
|
2600
|
+
<DataItem ItemType="HyperSlab" Dimensions="{self.nz:d} {self.ny:d} {self.nx:d}" Type="HyperSlab">
|
|
2601
|
+
<DataItem Dimensions="3 4" NumberType="Integer" Format="XML">
|
|
2602
|
+
{ti:<6d} {0:<6d} {0:<6d} {0:<6d}
|
|
2603
|
+
{1:<6d} {1:<6d} {1:<6d} {1:<6d}
|
|
2604
|
+
{1:<6d} {self.nz:<6d} {self.ny:<6d} {self.nx:<6d}
|
|
2605
|
+
</DataItem>
|
|
2606
|
+
<DataItem Dimensions="{self.nt:d} {self.nz:d} {self.ny:d} {self.nx:d}" NumberType="{dataset_numbertype_dict['vort_x']}" Precision="{dataset_precision_dict['vort_x']:d}" Format="HDF">
|
|
2607
|
+
{fname_base}:/{dset_hf_path_i}
|
|
2608
|
+
</DataItem>
|
|
2609
|
+
</DataItem>
|
|
2610
|
+
<!-- 2 -->
|
|
2611
|
+
<DataItem ItemType="HyperSlab" Dimensions="{self.nz:d} {self.ny:d} {self.nx:d}" Type="HyperSlab">
|
|
2612
|
+
<DataItem Dimensions="3 4" NumberType="Integer" Format="XML">
|
|
2613
|
+
{ti:<6d} {0:<6d} {0:<6d} {0:<6d}
|
|
2614
|
+
{1:<6d} {1:<6d} {1:<6d} {1:<6d}
|
|
2615
|
+
{1:<6d} {self.nz:<6d} {self.ny:<6d} {self.nx:<6d}
|
|
2616
|
+
</DataItem>
|
|
2617
|
+
<DataItem Dimensions="{self.nt:d} {self.nz:d} {self.ny:d} {self.nx:d}" NumberType="{dataset_numbertype_dict['vort_y']}" Precision="{dataset_precision_dict['vort_y']:d}" Format="HDF">
|
|
2618
|
+
{fname_base}:/{dset_hf_path_j}
|
|
2619
|
+
</DataItem>
|
|
2620
|
+
</DataItem>
|
|
2621
|
+
<!-- 3 -->
|
|
2622
|
+
<DataItem ItemType="HyperSlab" Dimensions="{self.nz:d} {self.ny:d} {self.nx:d}" Type="HyperSlab">
|
|
2623
|
+
<DataItem Dimensions="3 4" NumberType="Integer" Format="XML">
|
|
2624
|
+
{ti:<6d} {0:<6d} {0:<6d} {0:<6d}
|
|
2625
|
+
{1:<6d} {1:<6d} {1:<6d} {1:<6d}
|
|
2626
|
+
{1:<6d} {self.nz:<6d} {self.ny:<6d} {self.nx:<6d}
|
|
2627
|
+
</DataItem>
|
|
2628
|
+
<DataItem Dimensions="{self.nt:d} {self.nz:d} {self.ny:d} {self.nx:d}" NumberType="{dataset_numbertype_dict['vort_z']}" Precision="{dataset_precision_dict['vort_z']:d}" Format="HDF">
|
|
2629
|
+
{fname_base}:/{dset_hf_path_k}
|
|
2630
|
+
</DataItem>
|
|
2631
|
+
</DataItem>
|
|
2632
|
+
<!-- - -->
|
|
2633
|
+
</DataItem>
|
|
2634
|
+
</Attribute>
|
|
2635
|
+
'''
|
|
2636
|
+
|
|
2637
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 8*' '))
|
|
2638
|
+
|
|
2639
|
+
if makeTensors:
|
|
2640
|
+
if all([('dudx' in self.scalars),('dvdx' in self.scalars),('dwdx' in self.scalars),
|
|
2641
|
+
('dudy' in self.scalars),('dvdy' in self.scalars),('dwdy' in self.scalars),
|
|
2642
|
+
('dudz' in self.scalars),('dvdz' in self.scalars),('dwdz' in self.scalars)]):
|
|
2643
|
+
pass
|
|
2644
|
+
pass ## TODO
|
|
2645
|
+
pass
|
|
2646
|
+
|
|
2647
|
+
# === .xdmf : end Grid for this timestep
|
|
2648
|
+
|
|
2649
|
+
xdmf_str='''
|
|
2650
|
+
</Grid>
|
|
2651
|
+
'''
|
|
2652
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 6*' '))
|
|
2653
|
+
|
|
2654
|
+
# ===
|
|
2655
|
+
|
|
2656
|
+
xdmf_str='''
|
|
2657
|
+
</Grid>
|
|
2658
|
+
</Domain>
|
|
2659
|
+
</Xdmf>
|
|
2660
|
+
'''
|
|
2661
|
+
xdmf.write(textwrap.indent(textwrap.dedent(xdmf_str.strip('\n')), 0*' '))
|
|
2662
|
+
|
|
2663
|
+
if self.usingmpi:
|
|
2664
|
+
self.comm.Barrier()
|
|
2665
|
+
if verbose: print('--w-> %s'%fname_xdmf_base)
|
|
2666
|
+
return
|
|
2667
|
+
|
|
2668
|
+
# ==================================================================
|
|
2669
|
+
# External attachments
|
|
2670
|
+
# ==================================================================
|
|
2671
|
+
|
|
2672
|
+
def calc_mean(self, **kwargs):
|
|
2673
|
+
return _calc_mean(self, **kwargs)
|
|
2674
|
+
|
|
2675
|
+
# === [x] plane
|
|
2676
|
+
|
|
2677
|
+
def add_mean_dimensional_data_xpln(self, **kwargs):
|
|
2678
|
+
return _add_mean_dimensional_data_xpln(self, **kwargs)
|
|
2679
|
+
|
|
2680
|
+
def calc_turb_cospectrum_xpln(self, **kwargs):
|
|
2681
|
+
return _calc_turb_cospectrum_xpln(self, **kwargs)
|
|
2682
|
+
|
|
2683
|
+
def calc_wall_coh_xpln(self, **kwargs):
|
|
2684
|
+
return _calc_wall_coh_xpln(self, **kwargs)
|
|
2685
|
+
|
|
2686
|
+
def calc_ccor_xpln(self, **kwargs):
|
|
2687
|
+
return _calc_ccor_xpln(self, **kwargs)
|
|
2688
|
+
|
|
2689
|
+
def calc_statistics_xpln(self,**kwargs):
|
|
2690
|
+
return _calc_statistics_xpln(self,**kwargs)
|
|
2691
|
+
|
|
2692
|
+
def calc_turb_budget_xpln(self,**kwargs):
|
|
2693
|
+
return _calc_turb_budget_xpln(self,**kwargs)
|