turbx 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- turbx/__init__.py +52 -0
- turbx/bl.py +620 -0
- turbx/blasius.py +64 -0
- turbx/cli.py +19 -0
- turbx/composite_profile.py +243 -0
- turbx/confidence_interval.py +64 -0
- turbx/eas3.py +420 -0
- turbx/eas4.py +567 -0
- turbx/fig_ax_constructor.py +52 -0
- turbx/freestream_parameters.py +268 -0
- turbx/gradient.py +391 -0
- turbx/grid_metric.py +272 -0
- turbx/h5.py +236 -0
- turbx/mvp.py +385 -0
- turbx/rgd.py +2693 -0
- turbx/rgd_mean.py +523 -0
- turbx/rgd_testing.py +354 -0
- turbx/rgd_xpln_ccor.py +701 -0
- turbx/rgd_xpln_coh.py +992 -0
- turbx/rgd_xpln_mean_dim.py +336 -0
- turbx/rgd_xpln_spectrum.py +940 -0
- turbx/rgd_xpln_stats.py +738 -0
- turbx/rgd_xpln_turb_budget.py +1193 -0
- turbx/set_mpl_env.py +85 -0
- turbx/signal.py +277 -0
- turbx/spd.py +1206 -0
- turbx/spd_wall_ccor.py +629 -0
- turbx/spd_wall_ci.py +406 -0
- turbx/spd_wall_import.py +676 -0
- turbx/spd_wall_spectrum.py +638 -0
- turbx/spd_wall_stats.py +618 -0
- turbx/utils.py +84 -0
- turbx/ztmd.py +2224 -0
- turbx/ztmd_analysis.py +2337 -0
- turbx/ztmd_loader.py +56 -0
- turbx-1.0.2.dist-info/LICENSE +21 -0
- turbx-1.0.2.dist-info/METADATA +120 -0
- turbx-1.0.2.dist-info/RECORD +41 -0
- turbx-1.0.2.dist-info/WHEEL +5 -0
- turbx-1.0.2.dist-info/entry_points.txt +2 -0
- turbx-1.0.2.dist-info/top_level.txt +1 -0
turbx/set_mpl_env.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import matplotlib as mpl
|
|
2
|
+
|
|
3
|
+
'''
|
|
4
|
+
========================================================================
|
|
5
|
+
matplotlib environment initializer
|
|
6
|
+
========================================================================
|
|
7
|
+
'''
|
|
8
|
+
|
|
9
|
+
# ======================================================================
|
|
10
|
+
|
|
11
|
+
def set_mpl_env(**kwargs):
|
|
12
|
+
'''
|
|
13
|
+
Set matplotlib global presets
|
|
14
|
+
'''
|
|
15
|
+
fontsize = kwargs.get('fontsize',9)
|
|
16
|
+
usetex = kwargs.get('usetex',True)
|
|
17
|
+
sfac = kwargs.get('sfac',1) ## global scale factor
|
|
18
|
+
|
|
19
|
+
fontsize *= sfac ## scale font size by global scale factor
|
|
20
|
+
axesAndTickWidth = 0.5*sfac
|
|
21
|
+
|
|
22
|
+
mpl.rcParams['text.usetex'] = usetex
|
|
23
|
+
|
|
24
|
+
if usetex:
|
|
25
|
+
preamble_opts = [
|
|
26
|
+
r'\usepackage{amsmath}',
|
|
27
|
+
r'\usepackage{amssymb}',
|
|
28
|
+
r'\usepackage{gensymb}',
|
|
29
|
+
#r'\usepackage{graphicx}', ## not supported
|
|
30
|
+
r'\usepackage{newtxtext}', ## Times
|
|
31
|
+
r'\usepackage{newtxmath}', ## Times Math
|
|
32
|
+
r'\usepackage{xfrac}',
|
|
33
|
+
]
|
|
34
|
+
mpl.rcParams['text.latex.preamble'] = '\n'.join(preamble_opts)
|
|
35
|
+
|
|
36
|
+
mpl.rcParams['font.family'] = 'serif'
|
|
37
|
+
#mpl.rcParams['font.serif'] = 'Computer Modern Roman'
|
|
38
|
+
mpl.rcParams['font.serif'] = 'Times'
|
|
39
|
+
|
|
40
|
+
else:
|
|
41
|
+
pass
|
|
42
|
+
|
|
43
|
+
mpl.rcParams['xtick.major.size'] = 2.5*sfac
|
|
44
|
+
mpl.rcParams['xtick.major.width'] = axesAndTickWidth
|
|
45
|
+
mpl.rcParams['xtick.minor.size'] = 1.5*sfac
|
|
46
|
+
mpl.rcParams['xtick.minor.width'] = axesAndTickWidth
|
|
47
|
+
mpl.rcParams['xtick.direction'] = 'in'
|
|
48
|
+
|
|
49
|
+
mpl.rcParams['ytick.major.size'] = 2.5*sfac
|
|
50
|
+
mpl.rcParams['ytick.major.width'] = axesAndTickWidth
|
|
51
|
+
mpl.rcParams['ytick.minor.size'] = 1.5*sfac
|
|
52
|
+
mpl.rcParams['ytick.minor.width'] = axesAndTickWidth
|
|
53
|
+
mpl.rcParams['ytick.direction'] = 'in'
|
|
54
|
+
|
|
55
|
+
mpl.rcParams['xtick.labelsize'] = fontsize
|
|
56
|
+
mpl.rcParams['ytick.labelsize'] = fontsize
|
|
57
|
+
|
|
58
|
+
mpl.rcParams['xtick.major.pad'] = 3.0*sfac
|
|
59
|
+
mpl.rcParams['xtick.minor.pad'] = 3.0*sfac
|
|
60
|
+
mpl.rcParams['ytick.major.pad'] = 3.0*sfac
|
|
61
|
+
mpl.rcParams['ytick.minor.pad'] = 3.0*sfac
|
|
62
|
+
|
|
63
|
+
mpl.rcParams['lines.linewidth'] = 1.0
|
|
64
|
+
mpl.rcParams['lines.linestyle'] = 'solid'
|
|
65
|
+
mpl.rcParams['lines.marker'] = 'None' #'o'
|
|
66
|
+
mpl.rcParams['lines.markersize'] = 1.2
|
|
67
|
+
mpl.rcParams['lines.markeredgewidth'] = 0.
|
|
68
|
+
|
|
69
|
+
mpl.rcParams['axes.linewidth'] = axesAndTickWidth
|
|
70
|
+
mpl.rcParams['axes.labelpad'] = 2.0
|
|
71
|
+
mpl.rcParams['axes.titlesize'] = fontsize
|
|
72
|
+
mpl.rcParams['axes.labelsize'] = fontsize
|
|
73
|
+
mpl.rcParams['axes.formatter.use_mathtext'] = True
|
|
74
|
+
|
|
75
|
+
mpl.rcParams['legend.fontsize'] = fontsize
|
|
76
|
+
mpl.rcParams['legend.shadow'] = False
|
|
77
|
+
mpl.rcParams['legend.borderpad'] = 0.3
|
|
78
|
+
mpl.rcParams['legend.framealpha'] = 1.0
|
|
79
|
+
mpl.rcParams['legend.edgecolor'] = 'inherit'
|
|
80
|
+
mpl.rcParams['legend.handlelength'] = 1.5
|
|
81
|
+
mpl.rcParams['legend.handletextpad'] = 0.3
|
|
82
|
+
mpl.rcParams['legend.borderaxespad'] = 0.7
|
|
83
|
+
mpl.rcParams['legend.columnspacing'] = 0.5
|
|
84
|
+
mpl.rcParams['legend.fancybox'] = False
|
|
85
|
+
return
|
turbx/signal.py
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
import math
|
|
2
|
+
import os
|
|
3
|
+
import sys
|
|
4
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
import scipy as sp
|
|
8
|
+
import tqdm
|
|
9
|
+
|
|
10
|
+
# ======================================================================
|
|
11
|
+
|
|
12
|
+
def get_overlapping_window_size(asz, n_win, overlap_fac):
|
|
13
|
+
'''
|
|
14
|
+
get window length and overlap given a
|
|
15
|
+
desired number of windows and a nominal overlap factor
|
|
16
|
+
-----
|
|
17
|
+
--> the output should be passed to get_overlapping_windows()
|
|
18
|
+
to do the actual padding & windowing
|
|
19
|
+
'''
|
|
20
|
+
if not isinstance(asz, (int,np.int32,np.int64)):
|
|
21
|
+
raise TypeError('arg asz must be type int')
|
|
22
|
+
if not isinstance(n_win, (int,np.int32,np.int64)):
|
|
23
|
+
raise TypeError('arg n_win must be type int')
|
|
24
|
+
if (overlap_fac >= 1.):
|
|
25
|
+
raise ValueError('arg overlap_fac must be <1')
|
|
26
|
+
if (overlap_fac < 0.):
|
|
27
|
+
raise ValueError('arg overlap_fac must be >0')
|
|
28
|
+
n_ends = n_win+1
|
|
29
|
+
n_mids = n_win
|
|
30
|
+
|
|
31
|
+
# === solve for float-valued window 'mid' size & 'end' size
|
|
32
|
+
def eqn(soltup, asz=asz, overlap_fac=overlap_fac):
|
|
33
|
+
(endsz,midsz) = soltup
|
|
34
|
+
eq1 = asz - n_ends*endsz - n_mids*midsz
|
|
35
|
+
eq2 = overlap_fac*(midsz+2*endsz) - endsz
|
|
36
|
+
return [eq1, eq2]
|
|
37
|
+
|
|
38
|
+
guess = asz*0.5
|
|
39
|
+
endsz,midsz = sp.optimize.fsolve(eqn, (guess,guess), (asz,overlap_fac))
|
|
40
|
+
win_len = midsz + 2*endsz
|
|
41
|
+
overlap = endsz
|
|
42
|
+
|
|
43
|
+
win_len = max(math.ceil(win_len),1)
|
|
44
|
+
overlap = max(math.floor(overlap),0)
|
|
45
|
+
|
|
46
|
+
return win_len, overlap
|
|
47
|
+
|
|
48
|
+
def get_overlapping_windows(a, win_len, overlap):
|
|
49
|
+
'''
|
|
50
|
+
subdivide 1D array into overlapping windows
|
|
51
|
+
'''
|
|
52
|
+
#pad_mode = kwargs.get('pad_mode','append')
|
|
53
|
+
##
|
|
54
|
+
if not isinstance(a, np.ndarray):
|
|
55
|
+
raise TypeError('arg a must be type np.ndarray')
|
|
56
|
+
if not isinstance(win_len, int):
|
|
57
|
+
raise TypeError('arg win_len must be type int')
|
|
58
|
+
if not isinstance(overlap, int):
|
|
59
|
+
raise TypeError('arg overlap must be type int')
|
|
60
|
+
##
|
|
61
|
+
asz = a.size
|
|
62
|
+
skip = win_len - overlap
|
|
63
|
+
n_pad = (win_len - asz%skip)%skip
|
|
64
|
+
#a_pad = np.concatenate(( np.zeros(n_pad,dtype=a.dtype) , np.copy(a) )) ## prepend
|
|
65
|
+
a_pad = np.concatenate(( np.copy(a) , np.zeros(n_pad,dtype=a.dtype) )) ## append
|
|
66
|
+
##
|
|
67
|
+
b = np.lib.stride_tricks.sliding_window_view(a_pad, win_len, axis=0)
|
|
68
|
+
b = np.copy(b[::skip,:])
|
|
69
|
+
n_win = b.shape[0]
|
|
70
|
+
##
|
|
71
|
+
if (n_pad > 0.5*win_len):
|
|
72
|
+
print('WARNING: n_pad > overlap')
|
|
73
|
+
##
|
|
74
|
+
return b, n_win, n_pad
|
|
75
|
+
|
|
76
|
+
def ccor(ui,uj,**kwargs):
|
|
77
|
+
'''
|
|
78
|
+
1D normalized cross-correlation
|
|
79
|
+
'''
|
|
80
|
+
if (ui.ndim!=1):
|
|
81
|
+
raise AssertionError('ui.ndim!=1')
|
|
82
|
+
if (uj.ndim!=1):
|
|
83
|
+
raise AssertionError('uj.ndim!=1')
|
|
84
|
+
|
|
85
|
+
mode = kwargs.get('mode','full')
|
|
86
|
+
get_lags = kwargs.get('get_lags',False)
|
|
87
|
+
|
|
88
|
+
if get_lags:
|
|
89
|
+
lags = sp.signal.correlation_lags(ui.shape[0], uj.shape[0], mode=mode)
|
|
90
|
+
else:
|
|
91
|
+
lags = None
|
|
92
|
+
|
|
93
|
+
#R = sp.signal.correlate(ui, uj, mode=mode, method='direct')
|
|
94
|
+
R = sp.signal.correlate(ui, uj, mode=mode, method='fft') ## 'fft' is O(100)x faster for size O(10000) arrays
|
|
95
|
+
norm = np.sqrt(np.sum(ui**2)) * np.sqrt(np.sum(uj**2))
|
|
96
|
+
|
|
97
|
+
if (norm==0.):
|
|
98
|
+
#R = np.ones((R.shape[0],), dtype=ui.dtype)
|
|
99
|
+
R = np.zeros((R.shape[0],), dtype=ui.dtype)
|
|
100
|
+
else:
|
|
101
|
+
R /= norm
|
|
102
|
+
|
|
103
|
+
if get_lags:
|
|
104
|
+
return lags, R
|
|
105
|
+
else:
|
|
106
|
+
return R
|
|
107
|
+
|
|
108
|
+
def ccor_naive(u,v,**kwargs):
|
|
109
|
+
'''
|
|
110
|
+
1D normalized cross-correlation (naive version)
|
|
111
|
+
- this kernel is designed as a check for ccor()
|
|
112
|
+
'''
|
|
113
|
+
if (u.ndim!=1):
|
|
114
|
+
raise AssertionError('u.ndim!=1')
|
|
115
|
+
if (v.ndim!=1):
|
|
116
|
+
raise AssertionError('v.ndim!=1')
|
|
117
|
+
|
|
118
|
+
ii = np.arange(u.shape[0],dtype=np.int32)
|
|
119
|
+
jj = np.arange(v.shape[0],dtype=np.int32)
|
|
120
|
+
|
|
121
|
+
## lags (2D)
|
|
122
|
+
ll = np.stack(np.meshgrid(ii,jj,indexing='ij'), axis=-1)
|
|
123
|
+
ll = ll[:,:,0] - ll[:,:,1]
|
|
124
|
+
|
|
125
|
+
## lags (1D)
|
|
126
|
+
lmin = ll.min()
|
|
127
|
+
lmax = ll.max()
|
|
128
|
+
n_lags = lmax-lmin+1
|
|
129
|
+
lags = np.arange(lmin,lmax+1)
|
|
130
|
+
|
|
131
|
+
uu, vv = np.meshgrid(u,v,indexing='ij')
|
|
132
|
+
uv = np.stack((uu,vv), axis=-1)
|
|
133
|
+
uvp = np.prod(uv,axis=-1)
|
|
134
|
+
|
|
135
|
+
c=-1
|
|
136
|
+
R = np.zeros(n_lags, dtype=np.float64)
|
|
137
|
+
for lag in lags:
|
|
138
|
+
c+=1
|
|
139
|
+
X = np.where(ll==lag)
|
|
140
|
+
#N = X[0].shape[0]
|
|
141
|
+
R_ = np.sum(uvp[X]) / ( np.sqrt(np.sum(u**2)) * np.sqrt(np.sum(v**2)) )
|
|
142
|
+
R[c] = R_
|
|
143
|
+
return lags, R
|
|
144
|
+
|
|
145
|
+
def ccor_vec(ui,uj,axis=0):
|
|
146
|
+
'''
|
|
147
|
+
normalized cross-correlation, vectorized wrapper
|
|
148
|
+
---
|
|
149
|
+
Parameters:
|
|
150
|
+
ui, uj : ndarray
|
|
151
|
+
Input arrays with the same shape.
|
|
152
|
+
axis : int
|
|
153
|
+
Axis along which to compute the cross-correlation.
|
|
154
|
+
Returns:
|
|
155
|
+
R : ndarray
|
|
156
|
+
Cross-correlation results normalized where norms are non-zero.
|
|
157
|
+
'''
|
|
158
|
+
|
|
159
|
+
if ui.shape != uj.shape:
|
|
160
|
+
raise ValueError('ui and uj must have the same shape.')
|
|
161
|
+
|
|
162
|
+
nd = ui.ndim
|
|
163
|
+
if (nd<2):
|
|
164
|
+
raise ValueError('nd<2')
|
|
165
|
+
|
|
166
|
+
shape_orig = ui.shape
|
|
167
|
+
axes = tuple(range(ui.ndim))
|
|
168
|
+
|
|
169
|
+
if not isinstance(axis, int):
|
|
170
|
+
raise ValueError('axis should be of type int')
|
|
171
|
+
if (axis not in axes):
|
|
172
|
+
raise ValueError(f'axis={axis:d} is not valid for array with ui.ndim={str(ui.ndim)}')
|
|
173
|
+
|
|
174
|
+
## normalization for output array
|
|
175
|
+
norm = np.sqrt( np.sum( ui**2, axis=axis, keepdims=True ) ) * np.sqrt( np.sum( uj**2, axis=axis, keepdims=True ) )
|
|
176
|
+
|
|
177
|
+
nx = ui.shape[axis] ## size of axis over which gradient will be performed
|
|
178
|
+
shift_pos=nd-1 ## last axis
|
|
179
|
+
|
|
180
|
+
## shift cross-correlation axis to last axis
|
|
181
|
+
nx = ui.shape[axis] ## size of axis over which gradient will be performed
|
|
182
|
+
ui = np.swapaxes(ui, axis, shift_pos)
|
|
183
|
+
uj = np.swapaxes(uj, axis, shift_pos)
|
|
184
|
+
shape_new = ui.shape
|
|
185
|
+
size_all_but_ax = np.prod(np.array(shape_new)[:-1])
|
|
186
|
+
|
|
187
|
+
## shape of output
|
|
188
|
+
n_lags = 2*nx - 1
|
|
189
|
+
shape_out = list(shape_orig)
|
|
190
|
+
shape_out[axis] = n_lags
|
|
191
|
+
shape_out = tuple(shape_out)
|
|
192
|
+
|
|
193
|
+
## reshape N-D to 2D
|
|
194
|
+
## cross-correlation axis is 1, all other axes are flattened on axis=0)
|
|
195
|
+
ui = np.reshape(ui, (size_all_but_ax,nx), order='C')
|
|
196
|
+
uj = np.reshape(uj, (size_all_but_ax,nx), order='C')
|
|
197
|
+
|
|
198
|
+
## vectorize kernel
|
|
199
|
+
__ccor_kernel = np.vectorize(
|
|
200
|
+
lambda u,v: sp.signal.correlate(u,v, mode='full', method='direct'),
|
|
201
|
+
signature="(n),(n)->(m)",
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
## run kernel
|
|
205
|
+
R = __ccor_kernel(ui, uj)
|
|
206
|
+
|
|
207
|
+
## 2D to N-D
|
|
208
|
+
R = np.reshape(R, (*shape_new[:-1],n_lags), order='C')
|
|
209
|
+
|
|
210
|
+
## shift cross-correlation axis back to original position
|
|
211
|
+
R = np.swapaxes(R, axis, shift_pos)
|
|
212
|
+
if (R.shape != shape_out):
|
|
213
|
+
raise ValueError
|
|
214
|
+
|
|
215
|
+
## normalize
|
|
216
|
+
mask = norm != 0
|
|
217
|
+
mask = np.broadcast_to(mask, shape_out)
|
|
218
|
+
norm = np.broadcast_to(norm, shape_out)
|
|
219
|
+
R[mask] /= norm[mask]
|
|
220
|
+
R[~mask] = 0.
|
|
221
|
+
return R
|
|
222
|
+
|
|
223
|
+
def compute_bootstrap_statistic(x, f_statistic, **kwargs):
|
|
224
|
+
'''
|
|
225
|
+
Compute bootstrapped confidence intervals for a given statistic (function) using threads
|
|
226
|
+
'''
|
|
227
|
+
|
|
228
|
+
verbose = kwargs.get('verbose',False)
|
|
229
|
+
n_resamples = kwargs.get('n_resamples',10_000)
|
|
230
|
+
confidence_level = kwargs.get('confidence_level',0.99)
|
|
231
|
+
max_workers = kwargs.get('max_workers',None)
|
|
232
|
+
entropy = kwargs.get('entropy',None)
|
|
233
|
+
desc = kwargs.get('desc','compute_bootstrap_statistic()')
|
|
234
|
+
|
|
235
|
+
if not isinstance(x, np.ndarray):
|
|
236
|
+
raise ValueError('x must be numpy array')
|
|
237
|
+
if x.ndim != 1:
|
|
238
|
+
raise ValueError('x.ndim must be 1')
|
|
239
|
+
|
|
240
|
+
n_samples = x.shape[0]
|
|
241
|
+
sq = np.random.SeedSequence(entropy=entropy)
|
|
242
|
+
seeds = sq.spawn(n_resamples) ## generate N unique child seeds for local RNGs
|
|
243
|
+
# for seed_ in seeds: ## check
|
|
244
|
+
# print( int(seed_.generate_state(1)[0]) )
|
|
245
|
+
def __bootstrap_worker(seed):
|
|
246
|
+
local_rng = np.random.default_rng(seed=seed) ## a local RNG
|
|
247
|
+
sample = local_rng.choice(x, size=n_samples, replace=True)
|
|
248
|
+
return f_statistic(sample)
|
|
249
|
+
|
|
250
|
+
if max_workers is None:
|
|
251
|
+
max_workers = os.cpu_count()
|
|
252
|
+
|
|
253
|
+
if verbose:
|
|
254
|
+
progress_bar = tqdm(
|
|
255
|
+
total=n_resamples,
|
|
256
|
+
ncols=100,
|
|
257
|
+
desc=desc,
|
|
258
|
+
smoothing=0.,
|
|
259
|
+
leave=False,
|
|
260
|
+
file=sys.stdout,
|
|
261
|
+
bar_format="{l_bar}{bar}| {n}/{total} [{percentage:.1f}%] {elapsed}/{remaining}\n\033[F\r",
|
|
262
|
+
ascii="░█",
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
stats = []
|
|
266
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
267
|
+
futures = executor.map( __bootstrap_worker , seeds )
|
|
268
|
+
for stat in futures:
|
|
269
|
+
stats.append(stat)
|
|
270
|
+
if verbose:
|
|
271
|
+
progress_bar.update()
|
|
272
|
+
if verbose:
|
|
273
|
+
progress_bar.close()
|
|
274
|
+
stats = np.array(stats, dtype=np.float64)
|
|
275
|
+
alpha = (1 - confidence_level) * 100 / 2
|
|
276
|
+
ci_low, ci_high = np.percentile( stats , [ alpha , 100-alpha ] )
|
|
277
|
+
return ci_low, ci_high
|