turbx 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
turbx/blasius.py ADDED
@@ -0,0 +1,64 @@
1
+ import numpy as np
2
+ import scipy as sp
3
+
4
+ # ======================================================================
5
+
6
+ def Blasius_solution(eta):
7
+ '''
8
+ f·f′′ + 2·f′′′ = 0 ==> f′′′ = -(1/2)·f·f′′
9
+ BCs: f(0)=0, f′(0)=0, f′(∞)=1
10
+ -----
11
+ for solve_ivp(): d[f′′(η)]/dη = F(f(η), f′(η), f′′(η))
12
+ y=[f,f′,f′′], y′=[ y[1], y[2], (-1/2)·y[0]·y[2] ]
13
+ '''
14
+
15
+ def Blasius_rhs(t,y):
16
+ f, fp, fpp = y
17
+ return np.array([fp, fpp, -0.5*f*fpp])
18
+
19
+ if True: ## calculate c0
20
+
21
+ def eq_root(c0,eta):
22
+ sol = sp.integrate.solve_ivp(
23
+ fun=Blasius_rhs,
24
+ t_span=[0.,eta[-1]],
25
+ y0=[0.,0.,float(c0)],
26
+ t_eval=eta,
27
+ method='RK45',
28
+ atol=1e-12,
29
+ rtol=1e-12,
30
+ )
31
+ fp = np.copy( sol.y[1] ) ## f′
32
+ res = 1. - fp[-1] ## residual for BC: f′(∞)=0
33
+ return res
34
+
35
+ eta_test = np.linspace(0,500.,int(1e4))
36
+ sol = sp.optimize.fsolve(
37
+ eq_root,
38
+ x0=0.33205733621490,
39
+ args=(eta_test,),
40
+ xtol=1e-12,
41
+ )
42
+ c0 = sol[0]
43
+ #print(f'c0 = {c0:0.14f}')
44
+
45
+ else:
46
+ c0 = 0.33205733621490
47
+
48
+ sol = sp.integrate.solve_ivp(
49
+ fun=Blasius_rhs,
50
+ t_span=[0.,eta[-1]],
51
+ y0=[0.,0.,float(c0)],
52
+ t_eval=eta,
53
+ method='RK45',
54
+ atol=1e-12,
55
+ rtol=1e-12,
56
+ )
57
+
58
+ f, fp, fpp = sol.y ## f′=u/U
59
+
60
+ #i_99 = np.abs(fp-0.99).argmin()
61
+ #eta_99 = eta[i_99]
62
+ #print(f'η(f′={fp[i_99]:0.14f}) = {eta_99:0.14f}')
63
+
64
+ return f, fp, fpp
turbx/cli.py ADDED
@@ -0,0 +1,19 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+ '''
5
+ ========================================================================
6
+ Command line interface
7
+ ========================================================================
8
+ '''
9
+
10
+ import argparse
11
+
12
+
13
+ def main():
14
+ parser = argparse.ArgumentParser()
15
+ parser.add_argument("input", help="Help message for input")
16
+ args = parser.parse_args()
17
+ print(args.input)
18
+ ## ... this is a dummy CLI for now
19
+ ## call from cmd line with e.g. >$ turbx 'hello'
@@ -0,0 +1,243 @@
1
+ import numpy as np
2
+ import scipy as sp
3
+
4
+ '''
5
+ ========================================================================
6
+ Composite profile of Chauhan et al. (2009)
7
+ https://doi.org/10.1088/0169-5983/41/2/021404
8
+ ========================================================================
9
+ '''
10
+
11
+ # ======================================================================
12
+
13
+ class composite_profile_CMN2009():
14
+ '''
15
+ Tool for calculating composite profile described in Chauhan et al. (2009)
16
+ 'Criteria for assessing experiments in zero pressure gradient boundary layers'
17
+ Chauhan, Monkewitz and Nagib
18
+ https://doi.org/10.1088/0169-5983/41/2/021404
19
+ '''
20
+
21
+ def __init__(self, Re_tau, k=0.384, Pi=0.55, a=None, C=None, a2=132.8410, a3=-166.2041, a4=71.9114):
22
+ '''
23
+ k is the Von Kármán constant κ (asymptotic ≈0.384)
24
+ Reτ is the friction Reynolds number δ+ = δ/δν = δ·uτ/ν
25
+ C is the log law semilog constant (asymptotic ≈4.127, sometimes 5.0-5.2 in classic literature)
26
+ a is a parameter directly related to C, see paper
27
+ Π is the Coles wake parameter
28
+ -----
29
+ a = -10.3061 corresponds to C = 4.17
30
+ '''
31
+ self.k = k
32
+ self.Re_tau = Re_tau
33
+ self.Pi = Pi
34
+ self.a2 = a2
35
+ self.a3 = a3
36
+ self.a4 = a4
37
+
38
+ ## assert that exacly one of a or C was set
39
+ if (a is None) and (C is None):
40
+ raise ValueError('set one of a or C')
41
+ if (a is not None) and (C is not None):
42
+ raise ValueError('set either a or C, not both')
43
+
44
+ if (C is not None): ## find 'a' given C
45
+
46
+ self.C = C
47
+
48
+ if not isinstance(C,float):
49
+ raise ValueError
50
+
51
+ def __f_opti(a,C):
52
+ y_plus_inf = 1e12
53
+ B_asymptotic = (1/k)*np.log((y_plus_inf-a)/-a) + self.__uplus_inner_B(y_plus_inf,a,k) - (1/k)*np.log(y_plus_inf)
54
+ root = np.abs(B_asymptotic-C)
55
+ return root
56
+
57
+ sol = sp.optimize.least_squares(fun=__f_opti,
58
+ args=(C,),
59
+ x0=-10.,
60
+ xtol=1e-15,
61
+ ftol=1e-15,
62
+ gtol=1e-15,
63
+ method='dogbox',
64
+ bounds=(-100.,+100.))
65
+ if not sol.success:
66
+ raise ValueError
67
+
68
+ self.a = float(sol.x[0])
69
+
70
+ else:
71
+
72
+ self.a = a
73
+
74
+ def __f_opti(C,a):
75
+ y_plus_inf = 1e12
76
+ B_asymptotic = (1/k)*np.log((y_plus_inf-a)/-a) + self.__uplus_inner_B(y_plus_inf,a,k) - (1/k)*np.log(y_plus_inf)
77
+ root = np.abs(B_asymptotic-C)
78
+ return root
79
+
80
+ sol = sp.optimize.least_squares(fun=__f_opti,
81
+ args=(a,),
82
+ x0=-10.,
83
+ xtol=1e-15,
84
+ ftol=1e-15,
85
+ gtol=1e-15,
86
+ method='dogbox',
87
+ bounds=(-100.,+100.))
88
+ if not sol.success:
89
+ raise ValueError
90
+
91
+ self.C = float(sol.x[0])
92
+
93
+ def get_uplus_inner(self,y_plus):
94
+ '''
95
+ Calculate inner U+ profile
96
+ '''
97
+
98
+ a = self.a ## related to B for log law
99
+ k = self.k
100
+
101
+ ## this B is a function of y+ for U+_inner i.e. is a vector
102
+ B = self.__uplus_inner_B(y_plus,a,k)
103
+
104
+ ## the 'inner' U+ profile
105
+ u_plus_inner = np.copy( (1/k)*np.log((y_plus-a)/-a) + B )
106
+
107
+ ## log-law constant is [U+] - (1/k)ln(y+) for [y+ -> inf]
108
+ y_plus_inf = 1e14
109
+ B_asymptotic = (1/k)*np.log((y_plus_inf-a)/-a) + self.__uplus_inner_B(y_plus_inf,a,k) - (1/k)*np.log(y_plus_inf)
110
+ self.B = B_asymptotic
111
+ #print(f'B={B_asymptotic:0.14f}')
112
+
113
+ return u_plus_inner
114
+
115
+ def __uplus_inner_B(self,y_plus,a,k):
116
+
117
+ alpha = (-1/k - a)/2
118
+ beta = np.sqrt(-2*a*alpha - alpha**2)
119
+ R = np.sqrt( alpha**2 + beta**2 )
120
+
121
+ #T1 = (4*alpha+a) * np.log( -(a/R)*np.sqrt( (y_plus-alpha)**2 + beta**2 ) / ( y_plus - a ) ) \
122
+ # + (alpha/beta)*(4*alpha + 5*a)*( np.arctan((y_plus-alpha)/beta) + np.arctan(alpha/beta) )
123
+ T1 = (4*alpha+a) * np.log( -(a/R)*np.sqrt( (y_plus-alpha)**2 + beta**2 ) / ( y_plus - a ) ) \
124
+ + (alpha/beta)*(4*alpha + 5*a)*( np.arctan2((y_plus-alpha),beta) + np.arctan2(alpha,beta) )
125
+
126
+ T2 = R**2 / (a*(4*alpha-a)) * T1
127
+
128
+ return T2
129
+
130
+ def f_wake_Chauhan_exp(self,eta,Pi,a2,a3,a4):
131
+ '''
132
+ Exponential wake function W from Chauhan 2009
133
+ '''
134
+ T1 = 1. - np.exp( -(1/4)*( 5*a2 + 6*a3 + 7*a4 )*eta**4 + a2*eta**5 + a3*eta**6 + a4*eta**7 )
135
+ T2 = 1. - np.exp( -(a2 + 2*a3 + 3*a4)/4 )
136
+ T3 = 1. - 1/(2*Pi) * np.log(eta)
137
+ return (T1/T2) * T3
138
+
139
+ def get_wake(self, y_ov_delta, Pi):
140
+ '''
141
+ See Chauhan 2009
142
+ '''
143
+
144
+ a2 = self.a2
145
+ a3 = self.a3
146
+ a4 = self.a4
147
+
148
+ if (y_ov_delta[0]!=0.):
149
+ raise ValueError('y_ov_delta should start at 0')
150
+ W = np.zeros_like(y_ov_delta)
151
+ W[:] = np.nan
152
+ i_valid = np.where( (y_ov_delta>0.) & (y_ov_delta<=1.) )
153
+ eta = np.copy(y_ov_delta[i_valid])
154
+ W[i_valid] = self.f_wake_Chauhan_exp(eta,Pi,a2,a3,a4)
155
+ W[np.where(y_ov_delta==0.)] = 0.
156
+ #if np.isnan(W).any():
157
+ # raise ValueError
158
+ return W
159
+
160
+ def get_integral_quantities(self, y_plus, u_plus, interp_kind='cubic'):
161
+ '''
162
+ δ*=δ1, θ=δ2, Reθ, Reδ1, Reδ2, H
163
+ '''
164
+
165
+ if not isinstance(y_plus, np.ndarray):
166
+ raise ValueError
167
+ if not isinstance(u_plus, np.ndarray):
168
+ raise ValueError
169
+ if (y_plus.ndim!=1):
170
+ raise ValueError('y_plus.ndim!=1')
171
+ if (u_plus.ndim!=1):
172
+ raise ValueError('u_plus.ndim!=1')
173
+ if (u_plus.shape != y_plus.shape):
174
+ raise ValueError
175
+
176
+ ## y_max < δ
177
+ if (y_plus.max() < self.Re_tau):
178
+ raise ValueError('y+_max of profile cannot be <Reτ --> y_max < δ')
179
+
180
+ ## these are simply used locally to dimensionalize
181
+ nu = 1e-5 ## [m^2/s]
182
+ d = 37. ## [m]
183
+
184
+ u_tau = nu * self.Re_tau / d ## [m/s]
185
+
186
+ #sc_l_in = nu / u_tau
187
+ sc_l_in = d / self.Re_tau
188
+
189
+ U_inf_plus = self.get_uplus_inner(y_plus=self.Re_tau) + (2*self.Pi/self.k)
190
+ U_inf = U_inf_plus * u_tau ## [m/s]
191
+
192
+ u = np.copy( u_plus * u_tau ) ## [m/s]
193
+ y = np.copy( y_plus * sc_l_in ) ## [m]
194
+
195
+ integrand_theta = (u/U_inf)*(1-(u/U_inf))
196
+ integrand_dstar = 1-(u/U_inf)
197
+
198
+ #print(y.max())
199
+ #print(d)
200
+ #if not np.isclose(y.max(), d, rtol=1e-6):
201
+ # raise AssertionError
202
+
203
+ theta_ = sp.integrate.cumulative_trapezoid(y=integrand_theta, x=y, initial=0.)
204
+ theta_func = sp.interpolate.interp1d(y, theta_, kind=interp_kind, bounds_error=False, fill_value='extrapolate')
205
+ theta = theta_func(d)
206
+
207
+ dstar_ = sp.integrate.cumulative_trapezoid(y=integrand_dstar, x=y, initial=0.)
208
+ dstar_func = sp.interpolate.interp1d(y, dstar_, kind=interp_kind, bounds_error=False, fill_value='extrapolate')
209
+ dstar = dstar_func(d)
210
+
211
+ Re_theta = theta * U_inf / nu
212
+ Re_dstar = dstar * U_inf / nu
213
+ H = dstar / theta
214
+
215
+ #dRC = U_inf_plus / dstar ## Δ=U_inf+/δ
216
+
217
+ return Re_theta, Re_dstar, H
218
+
219
+ def calc_uplus_composite(self,y_plus):
220
+
221
+ y_ov_delta = np.copy( y_plus / self.Re_tau )
222
+ i_gt_1 = np.where(y_ov_delta>1.)
223
+
224
+ u_plus_inner = self.get_uplus_inner(y_plus)
225
+ W = self.get_wake(y_ov_delta, Pi=self.Pi)
226
+ i_nan = np.where(np.isnan(W))
227
+
228
+ np.testing.assert_array_equal(i_gt_1,i_nan)
229
+
230
+ ## U+_composite = U+_inner + U+_wake
231
+ u_plus = np.copy( u_plus_inner + (2*self.Pi/self.k)*W )
232
+
233
+ ## for y>δ, U+ = U+_inf
234
+ U_inf_plus = self.get_uplus_inner(y_plus=self.Re_tau) + (2*self.Pi/self.k)
235
+ u_plus[i_gt_1] = U_inf_plus
236
+
237
+ return u_plus
238
+
239
+ def __call__(self,y_plus):
240
+ '''
241
+ Calling object returns u+(y+)
242
+ '''
243
+ return self.calc_uplus_composite(y_plus)
@@ -0,0 +1,64 @@
1
+ import numpy as np
2
+ from scipy.stats import norm
3
+
4
+ # ======================================================================
5
+
6
+ def calc_var_bmbc(u,M,axis=0):
7
+ '''
8
+ Estimate N·σ² using "Batch Means and Batch Correlations" (BMBC)
9
+ see §4 of https://doi.org/10.1016/j.jcp.2017.07.005
10
+ N = n samples
11
+ M = size of batch
12
+ K = n batches
13
+ - Output is equivalent to N·σ²
14
+ -------------------------------------
15
+ --> !! requires update for ND data !!
16
+ '''
17
+ if not isinstance(u,np.ndarray):
18
+ raise TypeError('input should be numpy array')
19
+ if (u.ndim!=1):
20
+ raise NotImplementedError
21
+ #if (u.dtype!=np.float64):
22
+ # u = np.copy(u.astype(np.float64))
23
+
24
+ N = u.shape[axis]
25
+
26
+ ## assert N is divisible by M
27
+ if (N%M!=0):
28
+ raise ValueError('N%M!=0')
29
+ K = N//M ## n non-overlapping batches in series
30
+ if (K<3): ## must have >2 batch means
31
+ raise ValueError('K<3 where K=N/M')
32
+
33
+ u_mean = float( np.mean(u,axis=0) ) ## sample mean, μ̂
34
+
35
+ ## remove full series mean
36
+ uI = np.copy( u - u_mean )
37
+ uI_mean = float( np.mean(uI,axis=0) ) ## should be =0
38
+ np.testing.assert_allclose(uI_mean, 0., atol=1e-5)
39
+
40
+ uI_batched = np.copy(np.reshape(uI,(K,M),order='C'))
41
+ uI_batched_mean = np.mean( uI_batched , axis=1 ) ## \bar{x}_k Eq.29
42
+
43
+ S0 = np.sum( uI_batched_mean**2 ) ## Eq.31
44
+ S1 = np.sum( uI_batched_mean[:-1] * uI_batched_mean[1:] ) ## Eq.32
45
+
46
+ sig2 = (S0 + 2*S1) / ((K-1)*(K-2)) ## Eq.30
47
+ Nsig2 = sig2 * N
48
+ S1ovS0 = S1/S0 ## normalized lag-1 correlation of batch means
49
+ return Nsig2, S1ovS0
50
+
51
+ def confidence_interval_unbiased(mean, N_sigma2, N, confidence=0.99):
52
+ '''
53
+ Compute the confidence interval for the mean given UNBIASED N·σ²
54
+ '''
55
+ if not isinstance(N,(int,np.integer)):
56
+ raise TypeError('N should be an integer')
57
+ if (N<1):
58
+ raise ValueError('N<1')
59
+ sigma_Xbar = np.sign(N_sigma2) * np.sqrt( np.abs(N_sigma2) / N )
60
+ alpha = 1. - confidence
61
+ z = norm.ppf(1. - alpha / 2.) ## percent point function
62
+ ci_low = mean - z * sigma_Xbar
63
+ ci_high = mean + z * sigma_Xbar
64
+ return ci_low, ci_high