turbx 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- turbx/__init__.py +52 -0
- turbx/bl.py +620 -0
- turbx/blasius.py +64 -0
- turbx/cli.py +19 -0
- turbx/composite_profile.py +243 -0
- turbx/confidence_interval.py +64 -0
- turbx/eas3.py +420 -0
- turbx/eas4.py +567 -0
- turbx/fig_ax_constructor.py +52 -0
- turbx/freestream_parameters.py +268 -0
- turbx/gradient.py +391 -0
- turbx/grid_metric.py +272 -0
- turbx/h5.py +236 -0
- turbx/mvp.py +385 -0
- turbx/rgd.py +2693 -0
- turbx/rgd_mean.py +523 -0
- turbx/rgd_testing.py +354 -0
- turbx/rgd_xpln_ccor.py +701 -0
- turbx/rgd_xpln_coh.py +992 -0
- turbx/rgd_xpln_mean_dim.py +336 -0
- turbx/rgd_xpln_spectrum.py +940 -0
- turbx/rgd_xpln_stats.py +738 -0
- turbx/rgd_xpln_turb_budget.py +1193 -0
- turbx/set_mpl_env.py +85 -0
- turbx/signal.py +277 -0
- turbx/spd.py +1206 -0
- turbx/spd_wall_ccor.py +629 -0
- turbx/spd_wall_ci.py +406 -0
- turbx/spd_wall_import.py +676 -0
- turbx/spd_wall_spectrum.py +638 -0
- turbx/spd_wall_stats.py +618 -0
- turbx/utils.py +84 -0
- turbx/ztmd.py +2224 -0
- turbx/ztmd_analysis.py +2337 -0
- turbx/ztmd_loader.py +56 -0
- turbx-1.0.2.dist-info/LICENSE +21 -0
- turbx-1.0.2.dist-info/METADATA +120 -0
- turbx-1.0.2.dist-info/RECORD +41 -0
- turbx-1.0.2.dist-info/WHEEL +5 -0
- turbx-1.0.2.dist-info/entry_points.txt +2 -0
- turbx-1.0.2.dist-info/top_level.txt +1 -0
turbx/rgd_testing.py
ADDED
|
@@ -0,0 +1,354 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import timeit
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
from tqdm import tqdm
|
|
7
|
+
|
|
8
|
+
from .h5 import h5_chunk_sizer
|
|
9
|
+
from .utils import even_print, format_time_string
|
|
10
|
+
|
|
11
|
+
# ======================================================================
|
|
12
|
+
|
|
13
|
+
def _populate_abc_flow(self, **kwargs):
|
|
14
|
+
'''
|
|
15
|
+
Populate (unsteady) ABC flow dummy data
|
|
16
|
+
-----
|
|
17
|
+
https://en.wikipedia.org/wiki/Arnold%E2%80%93Beltrami%E2%80%93Childress_flow
|
|
18
|
+
'''
|
|
19
|
+
|
|
20
|
+
if (self.rank==0):
|
|
21
|
+
verbose = True
|
|
22
|
+
else:
|
|
23
|
+
verbose = False
|
|
24
|
+
|
|
25
|
+
if verbose: print('\n'+'rgd.populate_abc_flow()'+'\n'+72*'-')
|
|
26
|
+
t_start_func = timeit.default_timer()
|
|
27
|
+
|
|
28
|
+
rx = kwargs.get('rx',1)
|
|
29
|
+
ry = kwargs.get('ry',1)
|
|
30
|
+
rz = kwargs.get('rz',1)
|
|
31
|
+
##
|
|
32
|
+
chunk_kb = kwargs.get('chunk_kb',4*1024) ## 4 [MB]
|
|
33
|
+
|
|
34
|
+
self.nx = nx = kwargs.get('nx',100)
|
|
35
|
+
self.ny = ny = kwargs.get('ny',100)
|
|
36
|
+
self.nz = nz = kwargs.get('nz',100)
|
|
37
|
+
self.nt = nt = kwargs.get('nt',100)
|
|
38
|
+
|
|
39
|
+
data_gb = 3 * 4*nx*ny*nz*nt / 1024.**3
|
|
40
|
+
if verbose: even_print(self.fname, '%0.2f [GB]'%(data_gb,))
|
|
41
|
+
|
|
42
|
+
self.x = x = np.linspace(0., 2*np.pi, nx, dtype=np.float32)
|
|
43
|
+
self.y = y = np.linspace(0., 2*np.pi, ny, dtype=np.float32)
|
|
44
|
+
self.z = z = np.linspace(0., 2*np.pi, nz, dtype=np.float32)
|
|
45
|
+
#self.t = t = np.linspace(0., 10., nt, dtype=np.float32)
|
|
46
|
+
self.t = t = 0.1 * np.arange(nt, dtype=np.float32)
|
|
47
|
+
|
|
48
|
+
if (rx*ry*rz != self.n_ranks):
|
|
49
|
+
raise AssertionError('rx*ry*rz != self.n_ranks')
|
|
50
|
+
|
|
51
|
+
# ===
|
|
52
|
+
|
|
53
|
+
comm4d = self.comm.Create_cart(dims=[rx,ry,rz], periods=[False,False,False], reorder=False)
|
|
54
|
+
t4d = comm4d.Get_coords(self.rank)
|
|
55
|
+
|
|
56
|
+
rxl_ = np.array_split(np.arange(self.nx,dtype=np.int64),min(rx,self.nx))
|
|
57
|
+
ryl_ = np.array_split(np.arange(self.ny,dtype=np.int64),min(ry,self.ny))
|
|
58
|
+
rzl_ = np.array_split(np.arange(self.nz,dtype=np.int64),min(rz,self.nz))
|
|
59
|
+
#rtl_ = np.array_split(np.arange(self.nt,dtype=np.int64),min(rt,self.nt))
|
|
60
|
+
|
|
61
|
+
rxl = [[b[0],b[-1]+1] for b in rxl_ ]
|
|
62
|
+
ryl = [[b[0],b[-1]+1] for b in ryl_ ]
|
|
63
|
+
rzl = [[b[0],b[-1]+1] for b in rzl_ ]
|
|
64
|
+
#rtl = [[b[0],b[-1]+1] for b in rtl_ ]
|
|
65
|
+
|
|
66
|
+
rx1, rx2 = rxl[t4d[0]] #; nxr = rx2 - rx1
|
|
67
|
+
ry1, ry2 = ryl[t4d[1]] #; nyr = ry2 - ry1
|
|
68
|
+
rz1, rz2 = rzl[t4d[2]] #; nzr = rz2 - rz1
|
|
69
|
+
#rt1, rt2 = rtl[t4d[3]]; ntr = rt2 - rt1
|
|
70
|
+
|
|
71
|
+
## per-rank dim range
|
|
72
|
+
xr = x[rx1:rx2]
|
|
73
|
+
yr = y[ry1:ry2]
|
|
74
|
+
zr = z[rz1:rz2]
|
|
75
|
+
#tr = t[rt1:rt2]
|
|
76
|
+
tr = np.copy(t)
|
|
77
|
+
|
|
78
|
+
## write dims
|
|
79
|
+
self.create_dataset('dims/x', data=x)
|
|
80
|
+
self.create_dataset('dims/y', data=y)
|
|
81
|
+
self.create_dataset('dims/z', data=z)
|
|
82
|
+
self.create_dataset('dims/t', data=t)
|
|
83
|
+
|
|
84
|
+
shape = (self.nt,self.nz,self.ny,self.nx)
|
|
85
|
+
chunks = h5_chunk_sizer(nxi=shape, constraint=(1,None,None,None), size_kb=chunk_kb, base=4, itemsize=4)
|
|
86
|
+
|
|
87
|
+
## initialize
|
|
88
|
+
data_gb = 4*nx*ny*nz*nt / 1024.**3
|
|
89
|
+
for scalar in ['u','v','w']:
|
|
90
|
+
if ('data/%s'%scalar in self):
|
|
91
|
+
del self['data/%s'%scalar]
|
|
92
|
+
if verbose:
|
|
93
|
+
even_print('initializing data/%s'%(scalar,),'%0.2f [GB]'%(data_gb,))
|
|
94
|
+
dset = self.create_dataset('data/%s'%scalar,
|
|
95
|
+
shape=shape,
|
|
96
|
+
dtype=np.float32,
|
|
97
|
+
chunks=chunks,
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
chunk_kb_ = np.prod(dset.chunks)*4 / 1024. ## actual
|
|
101
|
+
if verbose:
|
|
102
|
+
even_print('chunk shape (t,z,y,x)','%s'%str(dset.chunks))
|
|
103
|
+
even_print('chunk size','%i [KB]'%int(round(chunk_kb_)))
|
|
104
|
+
|
|
105
|
+
if verbose: print(72*'-')
|
|
106
|
+
|
|
107
|
+
# === make 4D ABC flow data
|
|
108
|
+
|
|
109
|
+
t_start = timeit.default_timer()
|
|
110
|
+
A = np.sqrt(3)
|
|
111
|
+
B = np.sqrt(2)
|
|
112
|
+
C = 1.
|
|
113
|
+
na = np.newaxis
|
|
114
|
+
u = (A + 0.5 * tr[na,na,na,:] * np.sin(np.pi*tr[na,na,na,:])) * np.sin(zr[na,na,:,na]) + \
|
|
115
|
+
B * np.cos(yr[na,:,na,na]) + \
|
|
116
|
+
0.*xr[:,na,na,na]
|
|
117
|
+
v = B * np.sin(xr[:,na,na,na]) + \
|
|
118
|
+
C * np.cos(zr[na,na,:,na]) + \
|
|
119
|
+
0.*yr[na,:,na,na] + \
|
|
120
|
+
0.*tr[na,na,na,:]
|
|
121
|
+
w = C * np.sin(yr[na,:,na,na]) + \
|
|
122
|
+
(A + 0.5 * tr[na,na,na,:] * np.sin(np.pi*tr[na,na,na,:])) * np.cos(xr[:,na,na,na]) + \
|
|
123
|
+
0.*zr[na,na,:,na]
|
|
124
|
+
|
|
125
|
+
t_delta = timeit.default_timer() - t_start
|
|
126
|
+
if verbose: even_print('calc flow','%0.3f [s]'%(t_delta,))
|
|
127
|
+
|
|
128
|
+
# ===
|
|
129
|
+
|
|
130
|
+
data_gb = 4*nx*ny*nz*nt / 1024.**3
|
|
131
|
+
|
|
132
|
+
self.comm.Barrier()
|
|
133
|
+
t_start = timeit.default_timer()
|
|
134
|
+
ds = self['data/u']
|
|
135
|
+
with ds.collective:
|
|
136
|
+
ds[:,rz1:rz2,ry1:ry2,rx1:rx2] = u.T
|
|
137
|
+
self.comm.Barrier()
|
|
138
|
+
t_delta = timeit.default_timer() - t_start
|
|
139
|
+
if verbose: even_print('write: u','%0.2f [GB] %0.3f [s] %0.3f [GB/s]'%(data_gb,t_delta,(data_gb/t_delta)))
|
|
140
|
+
|
|
141
|
+
self.comm.Barrier()
|
|
142
|
+
t_start = timeit.default_timer()
|
|
143
|
+
ds = self['data/v']
|
|
144
|
+
with ds.collective:
|
|
145
|
+
ds[:,rz1:rz2,ry1:ry2,rx1:rx2] = v.T
|
|
146
|
+
self.comm.Barrier()
|
|
147
|
+
t_delta = timeit.default_timer() - t_start
|
|
148
|
+
if verbose: even_print('write: v','%0.2f [GB] %0.3f [s] %0.3f [GB/s]'%(data_gb,t_delta,(data_gb/t_delta)))
|
|
149
|
+
|
|
150
|
+
self.comm.Barrier()
|
|
151
|
+
t_start = timeit.default_timer()
|
|
152
|
+
ds = self['data/w']
|
|
153
|
+
with ds.collective:
|
|
154
|
+
ds[:,rz1:rz2,ry1:ry2,rx1:rx2] = w.T
|
|
155
|
+
self.comm.Barrier()
|
|
156
|
+
t_delta = timeit.default_timer() - t_start
|
|
157
|
+
if verbose: even_print('write: w','%0.2f [GB] %0.3f [s] %0.3f [GB/s]'%(data_gb,t_delta,(data_gb/t_delta)))
|
|
158
|
+
|
|
159
|
+
# ===
|
|
160
|
+
|
|
161
|
+
if verbose: print('\n'+72*'-')
|
|
162
|
+
if verbose: print('total time : rgd.populate_abc_flow() : %s'%format_time_string((timeit.default_timer() - t_start_func)))
|
|
163
|
+
if verbose: print(72*'-')
|
|
164
|
+
return
|
|
165
|
+
|
|
166
|
+
def _populate_white_noise(self, **kwargs):
|
|
167
|
+
'''
|
|
168
|
+
Populate white noise dummy data
|
|
169
|
+
--> hardcoded single precision output
|
|
170
|
+
'''
|
|
171
|
+
|
|
172
|
+
if (self.rank==0):
|
|
173
|
+
verbose = True
|
|
174
|
+
else:
|
|
175
|
+
verbose = False
|
|
176
|
+
|
|
177
|
+
if verbose: print('\n'+'rgd.populate_white_noise()'+'\n'+72*'-')
|
|
178
|
+
t_start_func = timeit.default_timer()
|
|
179
|
+
|
|
180
|
+
rx = kwargs.get('rx',1)
|
|
181
|
+
ry = kwargs.get('ry',1)
|
|
182
|
+
rz = kwargs.get('rz',1)
|
|
183
|
+
rt = kwargs.get('rt',1)
|
|
184
|
+
|
|
185
|
+
N = kwargs.get('N',1) ## number of timesteps to write at a time
|
|
186
|
+
|
|
187
|
+
chunk_kb = kwargs.get('chunk_kb',2*1024)
|
|
188
|
+
chunk_constraint = kwargs.get('chunk_constraint',(1,None,None,None))
|
|
189
|
+
chunk_base = kwargs.get('chunk_base',2)
|
|
190
|
+
|
|
191
|
+
self.nx = nx = kwargs.get('nx',128)
|
|
192
|
+
self.ny = ny = kwargs.get('ny',128)
|
|
193
|
+
self.nz = nz = kwargs.get('nz',128)
|
|
194
|
+
self.nt = nt = kwargs.get('nt',128)
|
|
195
|
+
|
|
196
|
+
if not isinstance(N, int):
|
|
197
|
+
raise TypeError('N must be type int')
|
|
198
|
+
if (self.nt%N !=0 ):
|
|
199
|
+
raise ValueError(f'{self.nt:d}%{N:d}!=0')
|
|
200
|
+
|
|
201
|
+
#data_gb = 3 * 4*nx*ny*nz*nt / 1024.**3
|
|
202
|
+
data_gb = 1 * 4*nx*ny*nz*nt / 1024.**3
|
|
203
|
+
|
|
204
|
+
if verbose: even_print(self.fname, '%0.2f [GB]'%(data_gb,))
|
|
205
|
+
if verbose: even_print('nx','%i'%self.nx)
|
|
206
|
+
if verbose: even_print('ny','%i'%self.ny)
|
|
207
|
+
if verbose: even_print('nz','%i'%self.nz)
|
|
208
|
+
if verbose: even_print('nt','%i'%self.nt)
|
|
209
|
+
if verbose: even_print('rx','%i'%rx)
|
|
210
|
+
if verbose: even_print('ry','%i'%ry)
|
|
211
|
+
if verbose: even_print('rz','%i'%rz)
|
|
212
|
+
if verbose: even_print('rt','%i'%rt)
|
|
213
|
+
if verbose: print(72*'-')
|
|
214
|
+
|
|
215
|
+
self.x = x = np.linspace(0., 2*np.pi, nx, dtype=np.float32)
|
|
216
|
+
self.y = y = np.linspace(0., 2*np.pi, ny, dtype=np.float32)
|
|
217
|
+
self.z = z = np.linspace(0., 2*np.pi, nz, dtype=np.float32)
|
|
218
|
+
#self.t = t = np.linspace(0., 10., nt, dtype=np.float32)
|
|
219
|
+
self.t = t = 0.1 * np.arange(nt, dtype=np.float32)
|
|
220
|
+
|
|
221
|
+
if (rx*ry*rz*rt != self.n_ranks):
|
|
222
|
+
raise AssertionError('rx*ry*rz*rt != self.n_ranks')
|
|
223
|
+
if (rx>self.nx):
|
|
224
|
+
raise AssertionError('rx>self.nx')
|
|
225
|
+
if (ry>self.ny):
|
|
226
|
+
raise AssertionError('ry>self.ny')
|
|
227
|
+
if (rz>self.nz):
|
|
228
|
+
raise AssertionError('rz>self.nz')
|
|
229
|
+
|
|
230
|
+
if self.usingmpi:
|
|
231
|
+
comm4d = self.comm.Create_cart(dims=[rx,ry,rz,rt], periods=[False,False,False,False], reorder=False)
|
|
232
|
+
t4d = comm4d.Get_coords(self.rank)
|
|
233
|
+
|
|
234
|
+
rxl_ = np.array_split(np.arange(self.nx,dtype=np.int64),min(rx,self.nx))
|
|
235
|
+
ryl_ = np.array_split(np.arange(self.ny,dtype=np.int64),min(ry,self.ny))
|
|
236
|
+
rzl_ = np.array_split(np.arange(self.nz,dtype=np.int64),min(rz,self.nz))
|
|
237
|
+
#rtl_ = np.array_split(np.arange(self.nt,dtype=np.int64),min(rt,self.nt))
|
|
238
|
+
rxl = [[b[0],b[-1]+1] for b in rxl_ ]
|
|
239
|
+
ryl = [[b[0],b[-1]+1] for b in ryl_ ]
|
|
240
|
+
rzl = [[b[0],b[-1]+1] for b in rzl_ ]
|
|
241
|
+
#rtl = [[b[0],b[-1]+1] for b in rtl_ ]
|
|
242
|
+
|
|
243
|
+
rx1, rx2 = rxl[t4d[0]] ; nxr = rx2 - rx1
|
|
244
|
+
ry1, ry2 = ryl[t4d[1]] ; nyr = ry2 - ry1
|
|
245
|
+
rz1, rz2 = rzl[t4d[2]] ; nzr = rz2 - rz1
|
|
246
|
+
#rt1, rt2 = rtl[t4d[3]] #; ntr = rt2 - rt1
|
|
247
|
+
|
|
248
|
+
## ## per-rank dim range
|
|
249
|
+
## xr = x[rx1:rx2]
|
|
250
|
+
## yr = y[ry1:ry2]
|
|
251
|
+
## zr = z[rz1:rz2]
|
|
252
|
+
## tr = t[rt1:rt2]
|
|
253
|
+
|
|
254
|
+
else:
|
|
255
|
+
nxr = nx
|
|
256
|
+
nyr = ny
|
|
257
|
+
nzr = nz
|
|
258
|
+
#ntr = nt
|
|
259
|
+
|
|
260
|
+
## write dims (independent)
|
|
261
|
+
self.create_dataset('dims/x', data=x, chunks=None)
|
|
262
|
+
self.create_dataset('dims/y', data=y, chunks=None)
|
|
263
|
+
self.create_dataset('dims/z', data=z, chunks=None)
|
|
264
|
+
self.create_dataset('dims/t', data=t, chunks=None)
|
|
265
|
+
|
|
266
|
+
shape = (self.nt,self.nz,self.ny,self.nx)
|
|
267
|
+
float_bytes = 4
|
|
268
|
+
chunks = h5_chunk_sizer(nxi=shape, constraint=chunk_constraint, size_kb=chunk_kb, base=chunk_base, itemsize=float_bytes)
|
|
269
|
+
|
|
270
|
+
#self.scalars = ['u','v','w']
|
|
271
|
+
self.scalars = ['u']
|
|
272
|
+
self.scalars_dtypes = [ np.dtype(np.float32) for s in self.scalars ]
|
|
273
|
+
|
|
274
|
+
## initialize datasets
|
|
275
|
+
data_gb = 4*nx*ny*nz*nt / 1024.**3
|
|
276
|
+
|
|
277
|
+
if self.usingmpi: self.comm.Barrier()
|
|
278
|
+
t_start_initialize = timeit.default_timer()
|
|
279
|
+
|
|
280
|
+
for scalar in self.scalars:
|
|
281
|
+
|
|
282
|
+
if ('data/%s'%scalar in self):
|
|
283
|
+
del self['data/%s'%scalar]
|
|
284
|
+
if verbose:
|
|
285
|
+
even_print('initializing data/%s'%(scalar,),'%0.2f [GB]'%(data_gb,))
|
|
286
|
+
|
|
287
|
+
if self.usingmpi: self.comm.Barrier()
|
|
288
|
+
t_start = timeit.default_timer()
|
|
289
|
+
|
|
290
|
+
dset = self.create_dataset(
|
|
291
|
+
f'data/{scalar}',
|
|
292
|
+
shape=shape,
|
|
293
|
+
dtype=np.float32,
|
|
294
|
+
chunks=chunks,
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
if self.usingmpi: self.comm.Barrier()
|
|
298
|
+
t_delta = timeit.default_timer() - t_start
|
|
299
|
+
|
|
300
|
+
if verbose: even_print(f'initialize data/{scalar}', f'{data_gb:0.2f} [GB] {t_delta:0.2f} [s] {(data_gb/t_delta):0.3f} [GB/s]')
|
|
301
|
+
|
|
302
|
+
chunk_kb_ = np.prod(dset.chunks)*4 / 1024. ## actual
|
|
303
|
+
if verbose:
|
|
304
|
+
even_print('chunk shape (t,z,y,x)','%s'%str(dset.chunks))
|
|
305
|
+
even_print('chunk size','%i [KB]'%int(round(chunk_kb_)))
|
|
306
|
+
|
|
307
|
+
if self.usingmpi: self.comm.Barrier()
|
|
308
|
+
t_initialize = timeit.default_timer() - t_start_initialize
|
|
309
|
+
|
|
310
|
+
if 1: ## write N ts at a time
|
|
311
|
+
|
|
312
|
+
data_gb_write = 0.
|
|
313
|
+
t_write = 0.
|
|
314
|
+
|
|
315
|
+
rng = np.random.default_rng(seed=self.rank) ## random number generator
|
|
316
|
+
data = np.zeros(shape=(nxr,nyr,nzr,N), dtype=np.float32)
|
|
317
|
+
|
|
318
|
+
if verbose:
|
|
319
|
+
progress_bar = tqdm(total=len(self.scalars)*(nt//N), ncols=100, desc='write', leave=False, file=sys.stdout, smoothing=0.)
|
|
320
|
+
|
|
321
|
+
for scalar in self.scalars:
|
|
322
|
+
for ti in range(nt//N):
|
|
323
|
+
|
|
324
|
+
cy1 = ti * N
|
|
325
|
+
cy2 = (ti+1) * N
|
|
326
|
+
|
|
327
|
+
data[:,:,:,:] = rng.uniform(-1, +1, size=(nxr,nyr,nzr,N)).astype(np.float32)
|
|
328
|
+
|
|
329
|
+
ds = self[f'data/{scalar}']
|
|
330
|
+
if self.usingmpi: self.comm.Barrier()
|
|
331
|
+
t_start = timeit.default_timer()
|
|
332
|
+
if self.usingmpi:
|
|
333
|
+
with ds.collective:
|
|
334
|
+
ds[cy1:cy2,rz1:rz2,ry1:ry2,rx1:rx2] = data.T
|
|
335
|
+
else:
|
|
336
|
+
ds[cy1:cy2,:,:,:] = data.T
|
|
337
|
+
if self.usingmpi: self.comm.Barrier()
|
|
338
|
+
t_delta = timeit.default_timer() - t_start
|
|
339
|
+
data_gb = 4*nx*ny*nz*N / 1024**3
|
|
340
|
+
|
|
341
|
+
t_write += t_delta
|
|
342
|
+
data_gb_write += data_gb
|
|
343
|
+
|
|
344
|
+
if verbose: progress_bar.update()
|
|
345
|
+
if verbose: progress_bar.close()
|
|
346
|
+
|
|
347
|
+
if verbose: print(72*'-')
|
|
348
|
+
if verbose: even_print('time initialize',format_time_string(t_initialize))
|
|
349
|
+
if verbose: even_print('write total', '%0.2f [GB] %0.3f [s] %0.3f [GB/s]'%(data_gb_write,t_write,(data_gb_write/t_write)))
|
|
350
|
+
if verbose: even_print(self.fname, '%0.2f [GB]'%(os.path.getsize(self.fname)/1024**3))
|
|
351
|
+
if verbose: print(72*'-')
|
|
352
|
+
if verbose: print('total time : rgd.populate_white_noise() : %s'%format_time_string((timeit.default_timer() - t_start_func)))
|
|
353
|
+
if verbose: print(72*'-')
|
|
354
|
+
return
|