turbx 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
turbx/rgd_mean.py ADDED
@@ -0,0 +1,523 @@
1
+ import os
2
+ import sys
3
+ import timeit
4
+ from pathlib import PurePosixPath
5
+
6
+ import numpy as np
7
+ import psutil
8
+ from mpi4py import MPI
9
+ from tqdm import tqdm
10
+
11
+ from .h5 import h5_chunk_sizer
12
+ from .utils import even_print, format_time_string
13
+
14
+ # ======================================================================
15
+
16
+ def _calc_mean(self, **kwargs):
17
+ '''
18
+ Calculate mean in [t] --> leaves [x,y,z,1]
19
+ --> save to new RGD file
20
+ -----
21
+ - uses accumulator buffers and does *(1/n) at end to calculate mean
22
+ - allows for low RAM usage, as the time dim can be sub-chunked (ct=N)
23
+ '''
24
+
25
+ rgd_meta = type(self) ## workaround for using rgd()
26
+
27
+ if (self.rank==0):
28
+ verbose = True
29
+ else:
30
+ verbose = False
31
+
32
+ if verbose: print('\n'+'rgd.calc_mean()'+'\n'+72*'-')
33
+ t_start_func = timeit.default_timer()
34
+
35
+ rx = kwargs.get('rx',1)
36
+ ry = kwargs.get('ry',1)
37
+ rz = kwargs.get('rz',1)
38
+ rt = kwargs.get('rt',1)
39
+
40
+ fn_rgd_mean = kwargs.get('fn_rgd_mean',None)
41
+ #sfm = kwargs.get('scalars',None) ## scalars to take (for mean)
42
+ ti_min = kwargs.get('ti_min',None)
43
+ favre = kwargs.get('favre',True)
44
+ reynolds = kwargs.get('reynolds',True)
45
+
46
+ ct = kwargs.get('ct',1) ## number of [t] chunks
47
+
48
+ force = kwargs.get('force',False)
49
+
50
+ chunk_kb = kwargs.get('chunk_kb',4*1024) ## h5 chunk size: default 4 [MB]
51
+ chunk_constraint = kwargs.get('chunk_constraint',(1,None,None,None)) ## the 'constraint' parameter for sizing h5 chunks
52
+ chunk_base = kwargs.get('chunk_base',2)
53
+
54
+ stripe_count = kwargs.pop('stripe_count' , 16 ) ## for initializing mean file
55
+ stripe_size_mb = kwargs.pop('stripe_size_mb' , 2 )
56
+
57
+ if (rt!=1):
58
+ raise AssertionError('rt!=1')
59
+ if (rx*ry*rz != self.n_ranks):
60
+ raise AssertionError('rx*ry*rz != self.n_ranks')
61
+ if (rx>self.nx):
62
+ raise AssertionError('rx>self.nx')
63
+ if (ry>self.ny):
64
+ raise AssertionError('ry>self.ny')
65
+ if (rz>self.nz):
66
+ raise AssertionError('rz>self.nz')
67
+ if (ti_min is not None):
68
+ if not isinstance(ti_min, int):
69
+ raise TypeError('ti_min must be type int')
70
+
71
+ if self.usingmpi:
72
+ comm4d = self.comm.Create_cart(dims=[rx,ry,rz], periods=[False,False,False], reorder=False)
73
+ t4d = comm4d.Get_coords(self.rank)
74
+
75
+ rxl_ = np.array_split(np.arange(self.nx,dtype=np.int64),min(rx,self.nx))
76
+ ryl_ = np.array_split(np.arange(self.ny,dtype=np.int64),min(ry,self.ny))
77
+ rzl_ = np.array_split(np.arange(self.nz,dtype=np.int64),min(rz,self.nz))
78
+ #rtl_ = np.array_split(np.arange(self.nt,dtype=np.int64),min(rt,self.nt))
79
+
80
+ rxl = [[b[0],b[-1]+1] for b in rxl_ ]
81
+ ryl = [[b[0],b[-1]+1] for b in ryl_ ]
82
+ rzl = [[b[0],b[-1]+1] for b in rzl_ ]
83
+ #rtl = [[b[0],b[-1]+1] for b in rtl_ ]
84
+
85
+ rx1, rx2 = rxl[t4d[0]]; nxr = rx2 - rx1
86
+ ry1, ry2 = ryl[t4d[1]]; nyr = ry2 - ry1
87
+ rz1, rz2 = rzl[t4d[2]]; nzr = rz2 - rz1
88
+ #rt1, rt2 = rtl[t4d[3]]; ntr = rt2 - rt1
89
+ else:
90
+ nxr = self.nx
91
+ nyr = self.ny
92
+ nzr = self.nz
93
+ #ntr = self.nt
94
+
95
+ nx = self.nx
96
+ ny = self.ny
97
+ nz = self.nz
98
+ nt = self.nt
99
+
100
+ ## mean file name (for writing)
101
+ if (fn_rgd_mean is None):
102
+ fname_path = os.path.dirname(self.fname)
103
+ fname_base = os.path.basename(self.fname)
104
+ fname_root, fname_ext = os.path.splitext(fname_base)
105
+ fname_mean_h5_base = fname_root+'_mean.h5'
106
+ #fn_rgd_mean = os.path.join(fname_path, fname_mean_h5_base)
107
+ fn_rgd_mean = str(PurePosixPath(fname_path, fname_mean_h5_base))
108
+ #fn_rgd_mean = Path(fname_path, fname_mean_h5_base)
109
+
110
+ if verbose: even_print('fn_rgd' , self.fname )
111
+ if verbose: even_print('fn_rgd_mean' , fn_rgd_mean )
112
+ if verbose: even_print('do Favre avg' , str(favre) )
113
+ if verbose: even_print('do Reynolds avg' , str(reynolds) )
114
+ if verbose: print(72*'-')
115
+ if verbose: even_print('nx',f'{self.nx:d}')
116
+ if verbose: even_print('ny',f'{self.ny:d}')
117
+ if verbose: even_print('nz',f'{self.nz:d}')
118
+ if verbose: even_print('nt',f'{self.nt:d}')
119
+ if verbose: print(72*'-')
120
+ if verbose: even_print('rx',f'{rx:d}')
121
+ if verbose: even_print('ry',f'{ry:d}')
122
+ if verbose: even_print('rz',f'{rz:d}')
123
+ if verbose: even_print('ct',f'{ct:d}')
124
+ if verbose: print(72*'-')
125
+
126
+ ## get times to take for avg
127
+ if (ti_min is not None):
128
+ ti_for_avg = np.copy( self.ti[ti_min:] )
129
+ else:
130
+ ti_for_avg = np.copy( self.ti )
131
+
132
+ nt_avg = ti_for_avg.shape[0]
133
+ t_avg_start = self.t[ti_for_avg[0]]
134
+ t_avg_end = self.t[ti_for_avg[-1]]
135
+ duration_avg = t_avg_end - t_avg_start
136
+
137
+ #if not isinstance(ct, (int,np.int32,np.int64)):
138
+ if not isinstance(ct, int):
139
+ raise ValueError
140
+ if (ct<1):
141
+ raise ValueError
142
+
143
+ ## [t] sub chunk range
144
+ ctl_ = np.array_split( ti_for_avg, min(ct,nt_avg) )
145
+ ctl = [[b[0],b[-1]+1] for b in ctl_ ]
146
+
147
+ ## check that no sub ranges are <=1
148
+ for a_ in [ ctl_[1]-ctl_[0] for ctl_ in ctl ]:
149
+ if (a_ <= 1):
150
+ raise ValueError
151
+
152
+ ## assert constant Δt, later attach dt as attribute to mean file
153
+ dt0 = np.diff(self.t)[0]
154
+ if not np.all(np.isclose(np.diff(self.t), dt0, rtol=1e-7)):
155
+ raise ValueError
156
+
157
+ if verbose: even_print('n timesteps avg','%i/%i'%(nt_avg,self.nt))
158
+ if verbose: even_print('t index avg start','%i'%(ti_for_avg[0],))
159
+ if verbose: even_print('t index avg end','%i'%(ti_for_avg[-1],))
160
+ if verbose: even_print('t avg start','%0.2f [-]'%(t_avg_start,))
161
+ if verbose: even_print('t avg end','%0.2f [-]'%(t_avg_end,))
162
+ if verbose: even_print('duration avg','%0.2f [-]'%(duration_avg,))
163
+ if verbose: even_print('Δt','%0.2f [-]'%(dt0,))
164
+ #if verbose: print(72*'-')
165
+
166
+ ## performance
167
+ t_read = 0.
168
+ t_write = 0.
169
+ data_gb_read = 0.
170
+ data_gb_write = 0.
171
+
172
+ #scalars_re = ['u','v','w','p','T','rho']
173
+ scalars_fv = ['u','v','w','T'] ## 'p','rho'
174
+
175
+ ## do a loop through to get names
176
+ scalars_mean_names = []
177
+ #scalars_mean_dtypes = []
178
+ for scalar in self.scalars:
179
+
180
+ ##dtype = self.scalars_dtypes_dict[scalar]
181
+ #dtype = np.float64 ## always save mean as double
182
+
183
+ if reynolds:
184
+ if True: ## always
185
+ sc_name = scalar
186
+ scalars_mean_names.append(sc_name)
187
+ #scalars_mean_dtypes.append(dtype)
188
+ if favre:
189
+ if (scalar in scalars_fv):
190
+ sc_name = f'r_{scalar}'
191
+ scalars_mean_names.append(sc_name)
192
+ #scalars_mean_dtypes.append(dtype)
193
+
194
+ #with rgd(fn_rgd_mean, 'w', force=force, driver='mpio', comm=MPI.COMM_WORLD) as hf_mean:
195
+ with rgd_meta(fn_rgd_mean, 'w', force=force, driver=self.driver, comm=self.comm, stripe_count=stripe_count, stripe_size_mb=stripe_size_mb) as hf_mean:
196
+
197
+ ## initialize the mean file from the opened unsteady rgd file
198
+ hf_mean.init_from_rgd(self.fname)
199
+
200
+ ## set some top-level attributes (in MEAN file)
201
+ hf_mean.attrs['duration_avg'] = duration_avg ## duration of mean
202
+ #hf_mean.attrs['duration_avg'] = self.duration
203
+ hf_mean.attrs['dt'] = dt0
204
+ #hf_mean.attrs['fclass'] = 'rgd'
205
+ hf_mean.attrs['fsubtype'] = 'mean'
206
+
207
+ if verbose: print(72*'-')
208
+
209
+ # === initialize datasets in mean file
210
+ for scalar in self.scalars:
211
+
212
+ data_gb_mean = np.dtype(np.float64).itemsize * self.nx*self.ny*self.nz * 1 / 1024**3
213
+
214
+ shape = (1,self.nz,self.ny,self.nx)
215
+ chunks = h5_chunk_sizer(nxi=shape, constraint=chunk_constraint, size_kb=chunk_kb, base=chunk_base, itemsize=np.dtype(np.float64).itemsize)
216
+
217
+ if reynolds:
218
+
219
+ ## do the Re mean of all scalars in file, regardless whether explicitly in scalars_re or not
220
+ #if scalar in scalars_re:
221
+ if True:
222
+
223
+ if ('data/%s'%scalar in hf_mean):
224
+ del hf_mean['data/%s'%scalar]
225
+ if verbose:
226
+ even_print( f'initializing data/{scalar}' , f'{data_gb_mean:0.3f} [GB]' )
227
+ dset = hf_mean.create_dataset(
228
+ f'data/{scalar}',
229
+ shape=shape,
230
+ dtype=np.float64, ## mean dsets always double
231
+ chunks=chunks,
232
+ )
233
+ hf_mean.scalars.append('data/%s'%scalar)
234
+
235
+ chunk_kb_ = np.prod(dset.chunks)*dset.dtype.itemsize / 1024. ## actual
236
+ if verbose:
237
+ even_print('chunk shape (t,z,y,x)','%s'%str(dset.chunks))
238
+ even_print('chunk size','%i [KB]'%int(round(chunk_kb_)))
239
+
240
+ if favre:
241
+
242
+ if (scalar in scalars_fv):
243
+ if ('data/%s_fv'%scalar in hf_mean):
244
+ del hf_mean['data/%s_fv'%scalar]
245
+ if verbose:
246
+ even_print( f'initializing data/{scalar}_fv' , f'{data_gb_mean:0.3f} [GB]' )
247
+ dset = hf_mean.create_dataset(f'data/{scalar}_fv',
248
+ shape=shape,
249
+ dtype=np.float64, ## mean dsets always double
250
+ chunks=chunks,
251
+ )
252
+ hf_mean.scalars.append('data/%s_fv'%scalar)
253
+
254
+ chunk_kb_ = np.prod(dset.chunks)*dset.dtype.itemsize / 1024. ## actual
255
+ if verbose:
256
+ even_print('chunk shape (t,z,y,x)','%s'%str(dset.chunks))
257
+ even_print('chunk size','%i [KB]'%int(round(chunk_kb_)))
258
+
259
+ if self.usingmpi: self.comm.Barrier()
260
+ #if verbose: print(72*'-')
261
+
262
+ ## accumulator array for local rank --> initialize
263
+ data_sum = np.zeros(shape=(nxr,nyr,nzr,1), dtype={'names':scalars_mean_names, 'formats':[np.float64 for _ in scalars_mean_names]})
264
+
265
+ # ==========================================================
266
+ # check memory
267
+ # ==========================================================
268
+
269
+ hostname = MPI.Get_processor_name()
270
+ mem_free_gb = psutil.virtual_memory().free / 1024**3
271
+ G = self.comm.gather([ self.rank , hostname , mem_free_gb ], root=0)
272
+ G = self.comm.bcast(G, root=0)
273
+
274
+ host_mem = {}
275
+ for rank, host, mem in G:
276
+ if host not in host_mem or mem < host_mem[host]:
277
+ host_mem[host] = mem
278
+ total_free = sum(host_mem.values())
279
+
280
+ if verbose:
281
+ print(72*'-')
282
+ for key,value in host_mem.items():
283
+ even_print(f'RAM free {key}', f'{int(np.floor(value)):d} [GB]')
284
+ even_print('RAM free (local,min)', f'{int(np.floor(min(host_mem.values()))):d} [GB]')
285
+ even_print('RAM free (global)', f'{int(np.floor(total_free)):d} [GB]')
286
+
287
+ shape_read_local = (nxr,nyr,nzr,nt//ct)
288
+ data_gb_local = np.dtype(np.float64).itemsize * np.prod(shape_read_local) / 1024**3
289
+ if verbose: even_print('read shape (local)', f'[{nxr:d},{nyr:d},{nzr:d},{nt//ct:d}]')
290
+ if verbose: even_print('read size (local)', f'{int(np.ceil(data_gb_local)):d} [GB]')
291
+
292
+ shape_read_global = (nx,ny,nz,nt//ct)
293
+ data_gb_global = np.dtype(np.float64).itemsize * np.prod(shape_read_global) / 1024**3
294
+ if verbose: even_print('read shape (global)' , f'[{nx:d},{ny:d},{nz:d},{nt//ct:d}]')
295
+ if verbose: even_print('read size (global)' , f'{int(np.ceil(data_gb_global)):d} [GB]')
296
+ if verbose: even_print('read size (global) ×3' , f'{int(np.ceil(data_gb_global*3)):d} [GB]')
297
+ ram_usage_est = data_gb_global*3/total_free
298
+
299
+ if verbose: even_print('RAM usage estimate', f'{100*ram_usage_est:0.1f} [%]')
300
+ self.comm.Barrier()
301
+ if (ram_usage_est>0.60):
302
+ print('RAM consumption might be too high. exiting.')
303
+ self.comm.Abort(1)
304
+
305
+ # ==========================================================
306
+ # main loop
307
+ # ==========================================================
308
+
309
+ if self.usingmpi: self.comm.Barrier()
310
+ if verbose: print(72*'-')
311
+
312
+ if verbose:
313
+ progress_bar = tqdm(
314
+ total=ct,
315
+ ncols=100,
316
+ desc='mean',
317
+ leave=True,
318
+ file=sys.stdout,
319
+ mininterval=0.1,
320
+ smoothing=0.,
321
+ #bar_format="\033[B{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}\033[A\n\b",
322
+ bar_format="{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}",
323
+ ascii="░█",
324
+ colour='#FF6600',
325
+ )
326
+
327
+ ct_counter=0
328
+ for ctl_ in ctl:
329
+ ct_counter += 1
330
+ ct1, ct2 = ctl_
331
+ ntc = ct2 - ct1
332
+
333
+ if (ct>1):
334
+ if verbose:
335
+ mesg = f'[t] sub chunk {ct_counter:d}/{ct:d}'
336
+ tqdm.write( mesg )
337
+ tqdm.write( '-'*len(mesg) )
338
+
339
+ ## Read ρ for Favre averaging
340
+ if favre:
341
+
342
+ dset = self['data/rho']
343
+
344
+ if self.usingmpi: self.comm.Barrier()
345
+ t_start = timeit.default_timer()
346
+
347
+ if self.usingmpi:
348
+ with dset.collective:
349
+ rho = dset[ct1:ct2,rz1:rz2,ry1:ry2,rx1:rx2].T
350
+ else:
351
+ rho = dset[ct1:ct2,:,:,:].T
352
+
353
+ if self.usingmpi: self.comm.Barrier()
354
+ t_delta = timeit.default_timer() - t_start
355
+
356
+ data_gb = dset.dtype.itemsize * self.nx*self.ny*self.nz * ntc / 1024**3
357
+ if verbose:
358
+ txt = even_print('read: rho', '%0.2f [GB] %0.2f [s] %0.3f [GB/s]'%(data_gb,t_delta,(data_gb/t_delta)), s=True)
359
+ tqdm.write(txt)
360
+
361
+ t_read += t_delta
362
+ data_gb_read += data_gb
363
+
364
+ ## convert to double
365
+ rho = rho.astype(np.float64)
366
+
367
+ ## Read data, perform sum Σ
368
+ for scalar in self.scalars:
369
+
370
+ dset = self[f'data/{scalar}']
371
+
372
+ if self.usingmpi: self.comm.Barrier()
373
+ t_start = timeit.default_timer()
374
+
375
+ if self.usingmpi:
376
+ with dset.collective:
377
+ data = dset[ct1:ct2,rz1:rz2,ry1:ry2,rx1:rx2].T
378
+ else:
379
+ data = dset[ct1:ct2,:,:,:].T
380
+
381
+ if self.usingmpi: self.comm.Barrier()
382
+ t_delta = timeit.default_timer() - t_start
383
+
384
+ data_gb = dset.dtype.itemsize * self.nx*self.ny*self.nz * ntc / 1024**3
385
+
386
+ if verbose:
387
+ txt = even_print('read: %s'%scalar, '%0.2f [GB] %0.2f [s] %0.3f [GB/s]'%(data_gb,t_delta,(data_gb/t_delta)), s=True)
388
+ tqdm.write(txt)
389
+
390
+ t_read += t_delta
391
+ data_gb_read += data_gb
392
+
393
+ ## convert to double
394
+ data = data.astype(np.float64)
395
+
396
+ # === do sum, add to accumulator
397
+ if reynolds:
398
+ sc_name = scalar
399
+ data_sum[sc_name] += np.sum(data, axis=-1, dtype=np.float64, keepdims=True)
400
+ if favre:
401
+ if (scalar in scalars_fv):
402
+ sc_name = f'r_{scalar}'
403
+ data_sum[sc_name] += np.sum(data*rho, axis=-1, dtype=np.float64, keepdims=True)
404
+
405
+ if self.usingmpi: self.comm.Barrier()
406
+
407
+ ## check RAM
408
+ #mem_avail_gb = psutil.virtual_memory().available/1024**3
409
+ mem_free_gb = psutil.virtual_memory().free/1024**3
410
+ if verbose:
411
+ tqdm.write(even_print('mem free', '%0.1f [GB]'%mem_free_gb, s=True))
412
+
413
+ if verbose: progress_bar.update()
414
+ if verbose: tqdm.write(72*'-')
415
+ if verbose: progress_bar.close()
416
+
417
+ # ==========================================================
418
+ # multiply accumulators by (1/n)
419
+ # ==========================================================
420
+
421
+ for scalar in self.scalars:
422
+ if reynolds:
423
+ sc_name = scalar
424
+ data_sum[sc_name] *= (1/nt_avg)
425
+ if favre:
426
+ if (scalar in scalars_fv):
427
+ sc_name = f'r_{scalar}'
428
+ data_sum[sc_name] *= (1/nt_avg)
429
+
430
+ # ==========================================================
431
+ # 'data_sum' now contains averages, not sums!
432
+ # ==========================================================
433
+
434
+ ## Favre avg : φ_tilde = avg[ρ·φ]/avg[ρ]
435
+ rho_mean = np.copy( data_sum['rho'] )
436
+
437
+ # === write
438
+ for scalar in self.scalars:
439
+
440
+ if reynolds:
441
+
442
+ dset = hf_mean[f'data/{scalar}']
443
+
444
+ data_out = np.copy( data_sum[scalar] )
445
+
446
+ ## if storing as single precision, pre-convert
447
+ if (dset.dtype==np.float32):
448
+ data_out = np.copy( data_sum[scalar].astype(np.float32) )
449
+
450
+ if self.usingmpi: self.comm.Barrier()
451
+ t_start = timeit.default_timer()
452
+ if self.usingmpi:
453
+ with dset.collective:
454
+ dset[:,rz1:rz2,ry1:ry2,rx1:rx2] = data_out.T
455
+ else:
456
+ dset[:,:,:,:] = data_out.T
457
+ if self.usingmpi: self.comm.Barrier()
458
+ t_delta = timeit.default_timer() - t_start
459
+
460
+ data_gb_mean = data_out.dtype.itemsize * self.nx*self.ny*self.nz * 1 / 1024**3
461
+
462
+ if verbose:
463
+ txt = even_print(f'write: {scalar}', '%0.2f [GB] %0.2f [s] %0.3f [GB/s]'%(data_gb_mean,t_delta,(data_gb_mean/t_delta)), s=True)
464
+ tqdm.write(txt)
465
+
466
+ t_write += t_delta
467
+ data_gb_write += data_gb_mean
468
+
469
+ if favre:
470
+ if (scalar in scalars_fv):
471
+
472
+ dset = hf_mean[f'data/{scalar}_fv']
473
+
474
+ ## φ_tilde = avg[ρ·φ]/avg[ρ]
475
+ data_out = np.copy( data_sum[f'r_{scalar}'] / rho_mean )
476
+
477
+ ## if storing as single precision, pre-convert
478
+ if (dset.dtype==np.float32):
479
+ data_out = np.copy( data_out.astype(np.float32) )
480
+
481
+ if self.usingmpi: self.comm.Barrier()
482
+ t_start = timeit.default_timer()
483
+ if self.usingmpi:
484
+ with dset.collective:
485
+ dset[:,rz1:rz2,ry1:ry2,rx1:rx2] = data_out.T
486
+ else:
487
+ dset[:,:,:,:] = data_out.T
488
+ if self.usingmpi: self.comm.Barrier()
489
+ t_delta = timeit.default_timer() - t_start
490
+
491
+ data_gb_mean = data_out.dtype.itemsize * self.nx*self.ny*self.nz * 1 / 1024**3
492
+
493
+ if verbose:
494
+ txt = even_print(f'write: {scalar}_fv', '%0.2f [GB] %0.2f [s] %0.3f [GB/s]'%(data_gb_mean,t_delta,(data_gb_mean/t_delta)), s=True)
495
+ tqdm.write(txt)
496
+
497
+ t_write += t_delta
498
+ data_gb_write += data_gb_mean
499
+
500
+ # === replace dims/t array --> take last time of series
501
+ t = np.array([self.t[-1]],dtype=np.float64)
502
+ if ('dims/t' in hf_mean):
503
+ del hf_mean['dims/t']
504
+ hf_mean.create_dataset('dims/t', data=t)
505
+
506
+ if hasattr(hf_mean, 'duration_avg'):
507
+ if verbose: even_print('duration avg', '%0.2f [-]'%hf_mean.duration_avg)
508
+
509
+ if verbose: print(72*'-')
510
+ if verbose: even_print('time read',format_time_string(t_read))
511
+ if verbose: even_print('time write',format_time_string(t_write))
512
+ if verbose: even_print('read total avg', '%0.2f [GB] %0.3f [s] %0.3f [GB/s]'%(data_gb_read,t_read,(data_gb_read/t_read)))
513
+ if verbose: even_print('write total avg', '%0.2f [GB] %0.3f [s] %0.3f [GB/s]'%(data_gb_write,t_write,(data_gb_write/t_write)))
514
+
515
+ ## report file
516
+ self.comm.Barrier()
517
+ if verbose:
518
+ print(72*'-')
519
+ even_print( os.path.basename(fn_rgd_mean), f'{(os.path.getsize(fn_rgd_mean)/1024**3):0.1f} [GB]')
520
+ if verbose: print(72*'-')
521
+ if verbose: print('total time : rgd.calc_mean() : %s'%format_time_string((timeit.default_timer() - t_start_func)))
522
+ if verbose: print(72*'-')
523
+ return