wawi 0.0.1__py3-none-any.whl → 0.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- wawi/__init__.py +8 -4
- wawi/fe.py +134 -0
- wawi/general.py +468 -0
- wawi/identification.py +66 -0
- wawi/io.py +719 -0
- wawi/modal.py +608 -0
- wawi/plot.py +569 -0
- wawi/prob.py +9 -0
- wawi/random.py +38 -0
- wawi/signal.py +45 -0
- wawi/structural.py +278 -0
- wawi/time_domain.py +126 -0
- wawi/tools.py +7 -0
- wawi/wave.py +491 -0
- wawi/wind.py +1109 -0
- wawi/wind_code.py +14 -0
- {wawi-0.0.1.dist-info → wawi-0.0.5.dist-info}/METADATA +7 -6
- wawi-0.0.5.dist-info/RECORD +21 -0
- wawi-0.0.1.dist-info/RECORD +0 -6
- {wawi-0.0.1.dist-info → wawi-0.0.5.dist-info}/LICENSE +0 -0
- {wawi-0.0.1.dist-info → wawi-0.0.5.dist-info}/WHEEL +0 -0
- {wawi-0.0.1.dist-info → wawi-0.0.5.dist-info}/top_level.txt +0 -0
wawi/wind.py
ADDED
@@ -0,0 +1,1109 @@
|
|
1
|
+
import numpy as np
|
2
|
+
from scipy.interpolate import interp1d
|
3
|
+
from .modal import statespace, iteig, restructure_as_ref, iteig_naive
|
4
|
+
from .tools import print_progress as pp
|
5
|
+
from scipy.special import jv as besselj, yv as bessely
|
6
|
+
from .general import rodrot, blkdiag
|
7
|
+
from .plot import plot_ads
|
8
|
+
|
9
|
+
conv_text='''
|
10
|
+
-----------------------------------------------------
|
11
|
+
| |
|
12
|
+
| ~ ~ ~~~ ~ ~~ ~ /^^^^^^^^^^^^\ 88ooo... . . . |
|
13
|
+
| ~ ~ ~ ~~ ~ ~ ~\____________/ 88ooo¨¨¨¨ ¨¨ |
|
14
|
+
| CONVERGED! |
|
15
|
+
-----------------------------------------------------
|
16
|
+
'''
|
17
|
+
|
18
|
+
beaufort_dict = {
|
19
|
+
'calm': [0, 0.5],
|
20
|
+
'light air': [0.5, 1.5],
|
21
|
+
'light breeze': [1.6, 3.3],
|
22
|
+
'gentle breeze': [3.4, 5.5],
|
23
|
+
'moderate breeze': [5.6, 7.9],
|
24
|
+
'fresh breeze': [8, 10.7],
|
25
|
+
'strong breeze': [10.8, 13.8],
|
26
|
+
'moderate gale': [13.9, 17.1],
|
27
|
+
'gale': [17.2, 20.7],
|
28
|
+
'strong gale': [20.8, 24.4],
|
29
|
+
'storm': [24.5, 28.4],
|
30
|
+
'violent storm': [28.5, 32.6],
|
31
|
+
'hurricane': [32.7, np.inf]
|
32
|
+
}
|
33
|
+
|
34
|
+
def get_beaufort(U0):
|
35
|
+
return [key for key in beaufort_dict if inrange(U0, beaufort_dict[key])][0]
|
36
|
+
|
37
|
+
def inrange(num, rng):
|
38
|
+
return num<=np.max(rng) and num>=np.min(rng)
|
39
|
+
|
40
|
+
class LoadCoefficients:
|
41
|
+
keys = ['Cd', 'Cm', 'Cl', 'dCd', 'dCm', 'dCl']
|
42
|
+
|
43
|
+
def __repr__(self):
|
44
|
+
return 'LoadCoefficients (Cd, Cl, Cm, dCd, dCl, dCm)'
|
45
|
+
|
46
|
+
def __str__(self):
|
47
|
+
return f'Cd:{self.Cd}, dCd:{self.dCd}, Cl:{self.Cl}, dCl:{self.dCl}, Cm:{self.Cm}, dCm:{self.dCm}'
|
48
|
+
|
49
|
+
def __init__(self, Cd=None, dCd=None, Cl=None, dCl=None, Cm=None, dCm=None, fill_empty=True):
|
50
|
+
self.Cd = Cd
|
51
|
+
self.dCd = dCd
|
52
|
+
self.Cl = Cl
|
53
|
+
self.dCl = dCl
|
54
|
+
self.Cm = Cm
|
55
|
+
self.dCm = dCm
|
56
|
+
|
57
|
+
if fill_empty:
|
58
|
+
self.fill_empty_with_zeros()
|
59
|
+
|
60
|
+
def fill_empty_with_zeros(self):
|
61
|
+
for key in self.keys:
|
62
|
+
if getattr(self, key) is None:
|
63
|
+
setattr(self, key, 0)
|
64
|
+
|
65
|
+
def to_dict(self):
|
66
|
+
return {key: getattr(self, key) for key in self.keys}
|
67
|
+
|
68
|
+
class ADs:
|
69
|
+
ad_keys = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6',
|
70
|
+
'H1', 'H2', 'H3', 'H4', 'H5', 'H6',
|
71
|
+
'A1', 'A2', 'A3', 'A4', 'A5', 'A6']
|
72
|
+
|
73
|
+
P1, P2, P3, P4, P5, P6 = None, None, None, None, None, None
|
74
|
+
H1, H2, H3, H4, H5, H6 = None, None, None, None, None, None
|
75
|
+
A1, A2, A3, A4, A5, A6 = None, None, None, None, None, None
|
76
|
+
|
77
|
+
def __init__(self, ad_type='not specified',
|
78
|
+
P1=None, P2=None, P3=None, P4=None, P5=None, P6=None,
|
79
|
+
H1=None, H2=None, H3=None, H4=None, H5=None, H6=None,
|
80
|
+
A1=None, A2=None, A3=None, A4=None, A5=None, A6=None):
|
81
|
+
|
82
|
+
self.type = ad_type
|
83
|
+
|
84
|
+
self.P1 = P1
|
85
|
+
self.P2 = P2
|
86
|
+
self.P3 = P3
|
87
|
+
self.P4 = P4
|
88
|
+
self.P5 = P5
|
89
|
+
self.P6 = P6
|
90
|
+
|
91
|
+
self.H1 = H1
|
92
|
+
self.H2 = H2
|
93
|
+
self.H3 = H3
|
94
|
+
self.H4 = H4
|
95
|
+
self.H5 = H5
|
96
|
+
self.H6 = H6
|
97
|
+
|
98
|
+
self.A1 = A1
|
99
|
+
self.A2 = A2
|
100
|
+
self.A3 = A3
|
101
|
+
self.A4 = A4
|
102
|
+
self.A5 = A5
|
103
|
+
self.A6 = A6
|
104
|
+
|
105
|
+
def plot(self, v=np.arange(0,5,0.01), **kwargs):
|
106
|
+
return plot_ads(self.to_dict(), v, **kwargs)
|
107
|
+
|
108
|
+
|
109
|
+
def to_dict(self):
|
110
|
+
return {key: getattr(self, key) for key in self.ad_keys}
|
111
|
+
|
112
|
+
def evaluate_all(self, v):
|
113
|
+
AD_evaluated = dict()
|
114
|
+
for key in self.ad_keys:
|
115
|
+
AD_evaluated[key] = getattr(self, key)(v)
|
116
|
+
|
117
|
+
return AD_evaluated
|
118
|
+
|
119
|
+
|
120
|
+
def evaluate(self, key, v):
|
121
|
+
AD_evaluated = getattr(self, key)(v)
|
122
|
+
|
123
|
+
return AD_evaluated
|
124
|
+
|
125
|
+
def flatplate_ads():
|
126
|
+
|
127
|
+
ad_dict = dict()
|
128
|
+
|
129
|
+
def F(v):
|
130
|
+
J1 = besselj(1, 0.5/v)
|
131
|
+
Y1 = bessely(1, 0.5/v)
|
132
|
+
J0 = besselj(0, 0.5/v)
|
133
|
+
Y0 = bessely(0, 0.5/v)
|
134
|
+
|
135
|
+
a = J1 + Y0
|
136
|
+
b = Y1 - J0
|
137
|
+
c = a**2 + b**2
|
138
|
+
|
139
|
+
return (J1*a + Y1*b)/c
|
140
|
+
|
141
|
+
def G(v):
|
142
|
+
J1 = besselj(1, 0.5/v)
|
143
|
+
Y1 = bessely(1, 0.5/v)
|
144
|
+
J0 = besselj(0, 0.5/v)
|
145
|
+
Y0 = bessely(0, 0.5/v)
|
146
|
+
|
147
|
+
a = J1 + Y0
|
148
|
+
b = Y1 - J0
|
149
|
+
c = a**2 + b**2
|
150
|
+
return -(J1*J0 + Y1*Y0)/c
|
151
|
+
|
152
|
+
ad_dict['H1'] = lambda v: -2*np.pi*F(v)*v
|
153
|
+
ad_dict['H2'] = lambda v: np.pi/2*(1+F(v)+4*G(v)*v)*v
|
154
|
+
ad_dict['H3'] = lambda v: 2*np.pi*(F(v)*v-G(v)/4)*v
|
155
|
+
ad_dict['H4'] = lambda v: np.pi/2*(1+4*G(v)*v)
|
156
|
+
ad_dict['H5'] = lambda v: 0*v
|
157
|
+
ad_dict['H6'] = lambda v: 0*v
|
158
|
+
|
159
|
+
ad_dict['A1'] = lambda v: -np.pi/2*F(v)*v
|
160
|
+
ad_dict['A2'] = lambda v: -np.pi/8*(1-F(v)-4*G(v)*v)*v
|
161
|
+
ad_dict['A3'] = lambda v: np.pi/2*(F(v)*v-G(v)/4)*v
|
162
|
+
ad_dict['A4'] = lambda v: np.pi/2*G(v)*v
|
163
|
+
ad_dict['A5'] = lambda v: 0*v
|
164
|
+
ad_dict['A6'] = lambda v: 0*v
|
165
|
+
|
166
|
+
ad_dict['P1'] = lambda v: 0*v
|
167
|
+
ad_dict['P2'] = lambda v: 0*v
|
168
|
+
ad_dict['P3'] = lambda v: 0*v
|
169
|
+
ad_dict['P4'] = lambda v: 0*v
|
170
|
+
ad_dict['P4'] = lambda v: 0*v
|
171
|
+
ad_dict['P5'] = lambda v: 0*v
|
172
|
+
ad_dict['P6'] = lambda v: 0*v
|
173
|
+
|
174
|
+
return ad_dict
|
175
|
+
|
176
|
+
|
177
|
+
def quasisteady_ads(D, B, load_coefficients):
|
178
|
+
# Assuming load coeffs are normalized wrt. both D (Cd) and B (Cl and Cm) and ADs are
|
179
|
+
# normalized using B only.
|
180
|
+
|
181
|
+
if type(load_coefficients)==dict:
|
182
|
+
Cd = load_coefficients['Cd']
|
183
|
+
dCd = load_coefficients['dCd']
|
184
|
+
Cl = load_coefficients['Cl']
|
185
|
+
dCl = load_coefficients['dCl']
|
186
|
+
Cm = load_coefficients['Cm']
|
187
|
+
dCm = load_coefficients['dCm']
|
188
|
+
else:
|
189
|
+
Cd, dCd = load_coefficients.Cd, load_coefficients.dCd
|
190
|
+
Cl, dCl = load_coefficients.Cl, load_coefficients.dCl
|
191
|
+
Cm, dCm = load_coefficients.Cm, load_coefficients.dCm
|
192
|
+
|
193
|
+
ad_dict = dict()
|
194
|
+
ad_dict['P1'], ad_dict['P2'], ad_dict['P3'] = lambda v: -2*Cd*D/B*v, lambda v: 0*v, lambda v: dCd*D/B*v**2
|
195
|
+
ad_dict['P4'], ad_dict['P5'], ad_dict['P6'] = lambda v: 0*v, lambda v: (Cl-dCd*D/B)*v, lambda v: 0*v
|
196
|
+
|
197
|
+
ad_dict['H1'], ad_dict['H2'], ad_dict['H3'] = lambda v: -(dCl+Cd*D/B)*v, lambda v: 0*v, lambda v: dCl*v**2
|
198
|
+
ad_dict['H4'], ad_dict['H5'], ad_dict['H6'] = lambda v: 0*v, lambda v: -2*Cl*v, lambda v: 0*v
|
199
|
+
|
200
|
+
ad_dict['A1'], ad_dict['A2'], ad_dict['A3'] = lambda v: -dCm*v, lambda v: 0*v, lambda v: dCm*v**2
|
201
|
+
ad_dict['A4'], ad_dict['A5'], ad_dict['A6'] = lambda v: 0*v, lambda v: -2*Cm*v, lambda v: 0*v
|
202
|
+
|
203
|
+
return ad_dict
|
204
|
+
|
205
|
+
|
206
|
+
def compute_aero_matrices(U, AD, B, elements, T_wind, phi,
|
207
|
+
omega_reduced=None, print_progress=False, rho=1.225):
|
208
|
+
|
209
|
+
if omega_reduced is None:
|
210
|
+
omega_reduced = np.linspace(0.015, 2.0, 75)
|
211
|
+
|
212
|
+
n_modes = phi.shape[1]
|
213
|
+
|
214
|
+
Kae = np.zeros([n_modes, n_modes, len(omega_reduced)])
|
215
|
+
Cae = np.zeros([n_modes, n_modes, len(omega_reduced)])
|
216
|
+
|
217
|
+
for element_ix, element in enumerate(elements):
|
218
|
+
|
219
|
+
if callable(U):
|
220
|
+
U_el_glob = U(element.get_cog())
|
221
|
+
else:
|
222
|
+
U_el_glob = U*1
|
223
|
+
|
224
|
+
U_el = normal_wind(T_wind, element.T0, U=U_el_glob)
|
225
|
+
|
226
|
+
v = U_el/(B*omega_reduced)
|
227
|
+
|
228
|
+
for k, v_k in enumerate(v):
|
229
|
+
k_aero, c_aero = element_aero_mats(B, omega_reduced[k],
|
230
|
+
AD.evaluate_all(v_k),
|
231
|
+
element.L, T=element.T0,
|
232
|
+
phi=phi[element.global_dofs, :], rho=rho)
|
233
|
+
|
234
|
+
Kae[:, :, k] = Kae[:, :, k] + k_aero
|
235
|
+
Cae[:, :, k] = Cae[:, :, k] + c_aero
|
236
|
+
|
237
|
+
if print_progress:
|
238
|
+
pp(element_ix+1, len(elements), sym='=', postfix=' ESTABLISHING WIND EXCITATION')
|
239
|
+
print('')
|
240
|
+
|
241
|
+
Cae = interp1d(omega_reduced, Cae, kind='quadratic', fill_value='extrapolate', bounds_error=False)
|
242
|
+
Kae = interp1d(omega_reduced, Kae, kind='quadratic', fill_value='extrapolate', bounds_error=False)
|
243
|
+
|
244
|
+
|
245
|
+
return Kae, Cae
|
246
|
+
|
247
|
+
|
248
|
+
def compute_aero_matrices_sets(U, AD, B, elements, T_wind, phi_dict,
|
249
|
+
omega_reduced=None, omega=None, print_progress=False, sets=None):
|
250
|
+
|
251
|
+
if sets is None:
|
252
|
+
sets = elements.keys()
|
253
|
+
|
254
|
+
if omega is None:
|
255
|
+
return_as_function = True
|
256
|
+
else:
|
257
|
+
first_is_zero = omega[0]==0.0
|
258
|
+
if first_is_zero:
|
259
|
+
omega = omega[1:]
|
260
|
+
|
261
|
+
if omega_reduced is None:
|
262
|
+
omega_reduced = np.logspace(np.log10(0.01), np.log10(2), 100) #standard values should be reasonable in most typical cases - change later!
|
263
|
+
|
264
|
+
first_key = [str(key) for key in sets][0]
|
265
|
+
n_modes = np.shape(phi_dict[first_key])[1]
|
266
|
+
|
267
|
+
Kae = np.zeros([n_modes, n_modes, len(omega_reduced)])
|
268
|
+
Cae = np.zeros([n_modes, n_modes, len(omega_reduced)])
|
269
|
+
|
270
|
+
for set_name in sets:
|
271
|
+
B_set = B[set_name]
|
272
|
+
AD_set = AD[set_name]
|
273
|
+
phi = phi_dict[set_name]
|
274
|
+
elements_set = elements[set_name]
|
275
|
+
|
276
|
+
for element_ix, element in enumerate(elements_set):
|
277
|
+
T_el = element.T0
|
278
|
+
U_el = normal_wind(T_wind, T_el, U=U)
|
279
|
+
v = U_el/(B_set*omega_reduced)
|
280
|
+
|
281
|
+
dof_range = np.hstack([element.nodes[0].global_dofs, element.nodes[1].global_dofs])
|
282
|
+
|
283
|
+
for k, v_k in enumerate(v):
|
284
|
+
k_aero, c_aero = element_aero_mats(B_set, omega_reduced[k], AD_set.evaluate_all(v_k), element.L, T=T_el, phi=phi[dof_range, :])
|
285
|
+
Kae[:, :, k] += k_aero
|
286
|
+
Cae[:, :, k] += c_aero
|
287
|
+
|
288
|
+
if print_progress:
|
289
|
+
pp(element_ix+1, len(elements_set), sym='>', postfix=f' finished with set "{set_name}".')
|
290
|
+
|
291
|
+
if print_progress:
|
292
|
+
print('')
|
293
|
+
|
294
|
+
Cae = interp1d(omega_reduced, Cae, kind='quadratic',fill_value='extrapolate')
|
295
|
+
Kae = interp1d(omega_reduced, Kae, kind='quadratic', fill_value='extrapolate')
|
296
|
+
|
297
|
+
if return_as_function:
|
298
|
+
return Kae, Cae
|
299
|
+
else:
|
300
|
+
Cae = Cae(omega)
|
301
|
+
Kae = Kae(omega)
|
302
|
+
|
303
|
+
if first_is_zero:
|
304
|
+
Cae = np.insert(Cae, 0, Cae[:,:,0]*0, axis=2)
|
305
|
+
Kae = np.insert(Kae, 0, Kae[:,:,0]*0, axis=2)
|
306
|
+
|
307
|
+
return Kae, Cae
|
308
|
+
|
309
|
+
def mvregress_ads(beta):
|
310
|
+
ad_dict = dict()
|
311
|
+
ad_keys = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6',
|
312
|
+
'H1', 'H2', 'H3', 'H4', 'H5', 'H6',
|
313
|
+
'A1', 'A2', 'A3', 'A4', 'A5', 'A6']
|
314
|
+
|
315
|
+
for key in ad_keys:
|
316
|
+
ad_dict[key] = lambda v, key=key: 0
|
317
|
+
|
318
|
+
#TODO: FINALIZE, NOT FINISHED
|
319
|
+
|
320
|
+
return ad_dict
|
321
|
+
|
322
|
+
|
323
|
+
def f_rf_fun_legacy(a, d, v):
|
324
|
+
N = len(a)
|
325
|
+
f = 0j
|
326
|
+
for l in range(0, 3):
|
327
|
+
f = f + a[l] * (1j/v)**l
|
328
|
+
|
329
|
+
for l in range(0, N-3):
|
330
|
+
f = f + a[l+2]*(1j/v) / ((1j/v + d[l]))
|
331
|
+
|
332
|
+
f = f*v**2
|
333
|
+
return f
|
334
|
+
|
335
|
+
|
336
|
+
def f_rf_fun(a, d, v):
|
337
|
+
N = len(a)
|
338
|
+
f = np.array(a[0])*0j
|
339
|
+
|
340
|
+
for l in range(0, 3):
|
341
|
+
f = f + a[l] * (1j/v)**l
|
342
|
+
|
343
|
+
for l in range(0, N-3):
|
344
|
+
f = f + a[l+2]*(1j/v) / ((1j/v + d[l]))
|
345
|
+
|
346
|
+
f = f*v**2
|
347
|
+
|
348
|
+
return f
|
349
|
+
|
350
|
+
|
351
|
+
def rf_ads(a, d):
|
352
|
+
# B assumed to be implicitly included in RF factors
|
353
|
+
ad_dict = dict()
|
354
|
+
ad_keys = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6',
|
355
|
+
'H1', 'H2', 'H3', 'H4', 'H5', 'H6',
|
356
|
+
'A1', 'A2', 'A3', 'A4', 'A5', 'A6']
|
357
|
+
|
358
|
+
imag_component_ad = ['P1', 'P2', 'P5', 'H1', 'H2', 'H5', 'A1', 'A2', 'A5']
|
359
|
+
|
360
|
+
position_dict = {'P1': [0,0], 'P2': [0,2], 'P3': [0,2], 'P4': [0,0], 'P5': [0,1], 'P6': [0,1],
|
361
|
+
'H1': [1,1], 'H2': [1,2], 'H3': [1,2], 'H4': [1,1], 'H5': [1,0], 'H6': [1,0],
|
362
|
+
'A1': [2,1], 'A2': [2,2], 'A3': [2,2], 'A4': [2,1], 'A5': [2,0], 'A6': [2,0]}
|
363
|
+
|
364
|
+
for key in ad_keys:
|
365
|
+
row = position_dict[key][0]
|
366
|
+
col = position_dict[key][1]
|
367
|
+
a_key = [ai[row, col] for ai in a]
|
368
|
+
|
369
|
+
if key in imag_component_ad:
|
370
|
+
ad_dict[key] = lambda v, a=a_key: np.imag(f_rf_fun_legacy(a, d, v))
|
371
|
+
else:
|
372
|
+
ad_dict[key] = lambda v, a=a_key: np.real(f_rf_fun_legacy(a, d, v))
|
373
|
+
|
374
|
+
return ad_dict
|
375
|
+
|
376
|
+
|
377
|
+
def distribute_to_dict(prefix, array, count_start=1):
|
378
|
+
array_dict = dict()
|
379
|
+
for ix,array_i in enumerate(array):
|
380
|
+
key = prefix + str(ix+count_start)
|
381
|
+
array_dict[key] = array_i
|
382
|
+
|
383
|
+
return array_dict
|
384
|
+
|
385
|
+
|
386
|
+
def distribute_multi_to_dict(prefixes, arrays):
|
387
|
+
array_dict = dict()
|
388
|
+
|
389
|
+
for prefix_ix, prefix in enumerate(prefixes):
|
390
|
+
for ix, array_i in enumerate(arrays[prefix_ix]):
|
391
|
+
key = prefix + str(ix+1)
|
392
|
+
array_dict[key] = array_i
|
393
|
+
|
394
|
+
return array_dict
|
395
|
+
|
396
|
+
|
397
|
+
def unwrap_rf_parameters(parameters):
|
398
|
+
keys = list(parameters.keys())
|
399
|
+
a_ixs = np.where([word.startswith('a') for word in keys])[0]
|
400
|
+
d_ixs = np.where([word.startswith('d') for word in keys])[0]
|
401
|
+
a_nums = np.array([int(string.split('a')[1]) for string in np.array(keys)[a_ixs]])
|
402
|
+
d_nums = np.array([int(string.split('d')[1]) for string in np.array(keys)[d_ixs]])
|
403
|
+
|
404
|
+
a = [np.zeros([3,3])]*(max(a_nums))
|
405
|
+
d = [0]*(max(d_nums))
|
406
|
+
|
407
|
+
for a_num in a_nums:
|
408
|
+
a[a_num-1] = np.array(parameters['a%i' %a_num])
|
409
|
+
|
410
|
+
for d_num in d_nums:
|
411
|
+
d[d_num-1] = parameters['d%i' %d_num]
|
412
|
+
|
413
|
+
d = np.array(d)
|
414
|
+
return a,d
|
415
|
+
|
416
|
+
|
417
|
+
def normal_wind(T_g2wi, T_g2el, U=1.0):
|
418
|
+
T_wi2el = T_g2el @ T_g2wi.T
|
419
|
+
e_wind_local = (T_wi2el @ np.array([1, 0, 0])[np.newaxis,:].T).flatten()
|
420
|
+
|
421
|
+
Un = U * np.sqrt(e_wind_local[1]**2+e_wind_local[2]**2)
|
422
|
+
return Un
|
423
|
+
|
424
|
+
|
425
|
+
def el_mat_generic(Ayy,Ayz,Ayt,Azy,Azz,Azt,Aty,Atz,Att,L):
|
426
|
+
mat = np.zeros([12,12])
|
427
|
+
|
428
|
+
mat[0:6, 0:6] = np.array([
|
429
|
+
[0, 0, 0, 0, 0, 0 ],
|
430
|
+
[0, 156*Ayy, 156*Ayz, 147*Ayt, -22*L*Ayz, 22*L*Ayy ],
|
431
|
+
[0, 156*Azy, 156*Azz, 147*Azt, -22*L*Azz, 22*L*Azy ],
|
432
|
+
[0, 147*Aty, 147*Atz, 140*Att, -21*L*Atz, 21*L*Aty ],
|
433
|
+
[0, -22*L*Azy, -22*L*Azz, -21*L*Azt, 4*L**2*Azz, -4*L**2*Azy ],
|
434
|
+
[0, 22*L*Ayy, 22*L*Ayz, 21*L*Ayt, -4*L**2*Ayz, 4*L**2*Ayy ],
|
435
|
+
])
|
436
|
+
|
437
|
+
mat[0:6, 6:12] = np.array([
|
438
|
+
[0, 0, 0, 0, 0, 0 ],
|
439
|
+
[0, 54*Ayy, 54*Ayz, 63*Ayt, 13*L*Ayz, -13*L*Ayy ],
|
440
|
+
[0, 54*Azy, 54*Azz, 63*Azt, 13*L*Azz, -13*L*Azy ],
|
441
|
+
[0, 63*Aty, 63*Atz, 70*Att, 14*L*Atz, -14*L*Aty ],
|
442
|
+
[0, -13*L*Azy, -13*L*Azz, -14*L*Azt, -3*L**2*Azz, 3*L**2*Azy ],
|
443
|
+
[0, 13*L*Ayy, 13*L*Ayz, 14*L*Ayt, 3*L**2*Ayz, -3*L**2*Ayy ],
|
444
|
+
])
|
445
|
+
|
446
|
+
mat[6:12, 0:6] = np.array([
|
447
|
+
[0, 0, 0, 0, 0, 0 ],
|
448
|
+
[0, 54*Ayy, 54*Ayz, 63*Ayt, -13*L*Ayz, 13*L*Ayy ],
|
449
|
+
[0, 54*Azy, 54*Azz, 63*Azt, -13*L*Azz, 13*L*Azy ],
|
450
|
+
[0, 63*Aty, 63*Atz, 70*Att, -14*L*Atz, 14*L*Aty ],
|
451
|
+
[0, 13*L*Azy, 13*L*Azz, 14*L*Azt, -3*L**2*Azz, 3*L**2*Azy ],
|
452
|
+
[0, -13*L*Ayy, -13*L*Ayz, -14*L*Ayt, 3*L**2*Ayz, -3*L**2*Ayy ],
|
453
|
+
])
|
454
|
+
|
455
|
+
mat[6:12,6:12] = np.array([
|
456
|
+
[0, 0, 0, 0, 0, 0 ],
|
457
|
+
[0, 156*Ayy, 156*Ayz, 147*Ayt, 22*L*Ayz, -22*L*Ayy ],
|
458
|
+
[0, 156*Azy, 156*Azz, 147*Azt, 22*L*Azz, -22*L*Azy ],
|
459
|
+
[0, 147*Aty, 147*Atz, 140*Att, 21*L*Atz, -21*L*Aty ],
|
460
|
+
[0, 22*L*Azy, 22*L*Azz, 21*L*Azt, 4*L**2*Azz, -4*L**2*Azy ],
|
461
|
+
[0, -22*L*Ayy, -22*L*Ayz, -21*L*Ayt, -4*L**2*Ayz, 4*L**2*Ayy ],
|
462
|
+
])
|
463
|
+
|
464
|
+
return mat
|
465
|
+
|
466
|
+
def element_aero_mats(B, omega, ad_dict, L, T=None, phi=None, rho=1.225):
|
467
|
+
# Called for selected reduced velocity, specified by omega value (implicitly mean wind).
|
468
|
+
# Corresponding values of P,H and A are used for given mean wind velocity.
|
469
|
+
|
470
|
+
# Stiffness
|
471
|
+
Ayy = 1/2*rho*B**2*omega**2*ad_dict['P4']
|
472
|
+
Ayz = 1/2*rho*B**2*omega**2*ad_dict['P6']
|
473
|
+
Ayt = -1/2*rho*B**2*omega**2*B*ad_dict['P3']
|
474
|
+
|
475
|
+
Azy = 1/2*rho*B**2*omega**2*ad_dict['H6']
|
476
|
+
Azz = 1/2*rho*B**2*omega**2*ad_dict['H4']
|
477
|
+
Azt = -1/2*rho*B**2*omega**2*B*ad_dict['H3']
|
478
|
+
|
479
|
+
Aty = -1/2*rho*B**2*omega**2*B*ad_dict['A6']
|
480
|
+
Atz = -1/2*rho*B**2*omega**2*B*ad_dict['A4']
|
481
|
+
Att = 1/2*rho*B**2*omega**2*B**2*ad_dict['A3']
|
482
|
+
|
483
|
+
k_aero = L/420 * el_mat_generic(Ayy,Ayz,Ayt,Azy,Azz,Azt,Aty,Atz,Att,L)
|
484
|
+
|
485
|
+
|
486
|
+
# Damping
|
487
|
+
Ayy = 1/2*rho*B**2*omega*ad_dict['P1']
|
488
|
+
Ayz = 1/2*rho*B**2*omega*ad_dict['P5']
|
489
|
+
Ayt = -1/2*rho*B**2*omega*B*ad_dict['P2']
|
490
|
+
|
491
|
+
Azy = 1/2*rho*B**2*omega*ad_dict['H5']
|
492
|
+
Azz = 1/2*rho*B**2*omega*ad_dict['H1']
|
493
|
+
Azt = -1/2*rho*B**2*omega*B*ad_dict['H2']
|
494
|
+
|
495
|
+
Aty = -1/2*rho*B**2*omega*B*ad_dict['A5']
|
496
|
+
Atz = -1/2*rho*B**2*omega*B*ad_dict['A1']
|
497
|
+
Att = 1/2*rho*B**2*omega*B**2*ad_dict['A2']
|
498
|
+
|
499
|
+
c_aero = L/420 * el_mat_generic(Ayy,Ayz,Ayt,Azy,Azz,Azt,Aty,Atz,Att,L)
|
500
|
+
|
501
|
+
if (T is None and phi is None)!=True:
|
502
|
+
if T is not None: #if no transformation matrix is given, a local matrix is output
|
503
|
+
if np.shape(T)[0]==6:
|
504
|
+
T = np.kron(np.eye(2), T) #two times 6dof matrix, block diagonal
|
505
|
+
if np.shape(T)[0]==3:
|
506
|
+
T = np.kron(np.eye(4), T) #four times 3dof matrix, block diagonal
|
507
|
+
elif np.shape(T)[0]!=12:
|
508
|
+
raise ValueError('Wrong size of T (should be 3x3, 6x6 or 12x12')
|
509
|
+
else:
|
510
|
+
T = np.eye(12)
|
511
|
+
|
512
|
+
if phi is not None:
|
513
|
+
T = T @ phi
|
514
|
+
|
515
|
+
k_aero = T.T @ k_aero @ T
|
516
|
+
c_aero = T.T @ c_aero @ T
|
517
|
+
|
518
|
+
return k_aero, c_aero
|
519
|
+
|
520
|
+
|
521
|
+
# Spectra
|
522
|
+
def kaimal_auto(omega, Lx, A, sigma, V):
|
523
|
+
f = omega/(2*np.pi)
|
524
|
+
fhat = f*Lx/V
|
525
|
+
S = (sigma**2*(A*fhat)/(1+(1.5*A*fhat))**(5/3))/f
|
526
|
+
|
527
|
+
return S/(2*np.pi)
|
528
|
+
|
529
|
+
def von_Karman_auto(omega, Lx, sigma, V):
|
530
|
+
|
531
|
+
A1 = [
|
532
|
+
0.0,
|
533
|
+
0.0,
|
534
|
+
755.2,
|
535
|
+
]
|
536
|
+
|
537
|
+
A2 = [
|
538
|
+
70.8,
|
539
|
+
0.0,
|
540
|
+
283.2,
|
541
|
+
]
|
542
|
+
|
543
|
+
rr = [
|
544
|
+
5/6,
|
545
|
+
11/6,
|
546
|
+
11/6,
|
547
|
+
]
|
548
|
+
|
549
|
+
f = omega/(2*np.pi)
|
550
|
+
fhat = f*Lx/V
|
551
|
+
S = (sigma**2*( (4*fhat)*(1+A1*fhat**2) )/ (1+A2*fhat**2)**(rr))/f
|
552
|
+
|
553
|
+
return S/(2*np.pi)
|
554
|
+
|
555
|
+
def generic_kaimal_matrix(omega, nodes, T_wind, A, sigma, C, Lx, U, options=None):
|
556
|
+
# Adopted from MATLAB version. `nodes` is list with beef-nodes.
|
557
|
+
V = np.zeros(len(nodes)) # Initialize vector with mean wind in all nodes
|
558
|
+
Su = np.zeros([len(nodes), len(nodes)]) # One-point spectra for u component in all nodes
|
559
|
+
Sv = np.zeros([len(nodes), len(nodes)]) # One-point spectra for v component in all nodes
|
560
|
+
Sw = np.zeros([len(nodes), len(nodes)]) # One-point spectra for w component in all nodes
|
561
|
+
xyz = np.zeros([len(nodes), 3]) # Nodes in wind coordinate system
|
562
|
+
|
563
|
+
if options is None:
|
564
|
+
options = {
|
565
|
+
'spectra_type': 'Kaimal'
|
566
|
+
}
|
567
|
+
|
568
|
+
for node_ix, node in enumerate(nodes):
|
569
|
+
xyz[node_ix,:] = (T_wind @ node.coordinates).T #Transform node coordinates to the wind coordinate system
|
570
|
+
V[node_ix] = U(node.coordinates) # Mean wind velocity in the nodes
|
571
|
+
|
572
|
+
if 'spectra_type' in options:
|
573
|
+
if options['spectra_type'] == 'vonKarman':
|
574
|
+
Su[node_ix,:], Sv[node_ix,:], Sw[node_ix,:] = von_Karman_auto(omega, Lx, sigma, V[node_ix])
|
575
|
+
elif options['spectra_type'] == 'Kaimal':
|
576
|
+
Su[node_ix,:], Sv[node_ix,:], Sw[node_ix,:] = kaimal_auto(omega, Lx, A, sigma, V[node_ix]) # One point spectra for u component in all nodes
|
577
|
+
else: # use Kaimal (default)
|
578
|
+
Su[node_ix,:], Sv[node_ix,:], Sw[node_ix,:] = kaimal_auto(omega, Lx, A, sigma, V[node_ix])
|
579
|
+
|
580
|
+
x = xyz[:, 0]
|
581
|
+
y = xyz[:, 1]
|
582
|
+
z = xyz[:, 2]
|
583
|
+
|
584
|
+
dxdx = x[np.newaxis,:] - x[np.newaxis,:].T # Matrix with all distances between nodes in x direction
|
585
|
+
dydy = y[np.newaxis,:] - y[np.newaxis,:].T # Matrix with all distances between nodes in y direction
|
586
|
+
dzdz = z[np.newaxis,:] - z[np.newaxis,:].T # Matrix with all distances between nodes in z direction
|
587
|
+
|
588
|
+
invV = 2/(V[np.newaxis,:]+V[np.newaxis,:].T) # Inverse mean wind velocity for all combination of nodes
|
589
|
+
|
590
|
+
Suu = np.sqrt(Su)*np.sqrt(Su).T*np.exp(
|
591
|
+
-invV*omega/(2*np.pi)*np.sqrt(
|
592
|
+
(C[0,0]*dxdx)**2 + (C[1,0]*dydy)**2 + (C[2,0]*dzdz)**2)
|
593
|
+
)
|
594
|
+
|
595
|
+
Svv = np.sqrt(Sv)*np.sqrt(Sv).T*np.exp(
|
596
|
+
-invV*omega/(2*np.pi)*np.sqrt(
|
597
|
+
(C[0,1]*dxdx)**2 + (C[1,1]*dydy)**2 + (C[2,1]*dzdz)**2)
|
598
|
+
)
|
599
|
+
|
600
|
+
Sww = np.sqrt(Sw)*np.sqrt(Sw).T*np.exp(
|
601
|
+
-invV*omega/(2*np.pi)*np.sqrt(
|
602
|
+
(C[0,2]*dxdx)**2 + (C[1,2]*dydy)**2 + (C[2,2]*dzdz)**2)
|
603
|
+
)
|
604
|
+
|
605
|
+
SvSv = np.zeros([3*len(nodes), 3*len(nodes)]) # Cross sectral density matrix containing all the turbulence components
|
606
|
+
SvSv[0::3, 0::3] = Suu
|
607
|
+
SvSv[1::3, 1::3] = Svv
|
608
|
+
SvSv[2::3, 2::3] = Sww
|
609
|
+
|
610
|
+
return SvSv
|
611
|
+
|
612
|
+
|
613
|
+
def loadmatrix_fe(V, load_coefficients, rho, B, D, admittance=None):
|
614
|
+
|
615
|
+
if admittance is None :
|
616
|
+
admittance = lambda omega_k: np.ones( (4,3) )
|
617
|
+
|
618
|
+
Cd = load_coefficients['Cd']
|
619
|
+
dCd = load_coefficients['dCd']
|
620
|
+
Cl = load_coefficients['Cl']
|
621
|
+
dCl = load_coefficients['dCl']
|
622
|
+
Cm = load_coefficients['Cm']
|
623
|
+
dCm = load_coefficients['dCm']
|
624
|
+
|
625
|
+
# Equation 7 from Oiseth, 2010
|
626
|
+
BqBq = lambda omega_k: 1/2*rho*V*B*admittance(omega_k*B/V/2/np.pi)*np.array([[0, 0, 0],
|
627
|
+
[0, 2*D/B*Cd, (D/B*dCd-Cl)],
|
628
|
+
[0, 2*Cl, (dCl+D/B*Cd)],
|
629
|
+
[0, -2*B*Cm, -B*dCm]])
|
630
|
+
|
631
|
+
return BqBq
|
632
|
+
|
633
|
+
def loadmatrix_fe_static(V, load_coefficients, rho, B, D ):
|
634
|
+
|
635
|
+
Cd = load_coefficients['Cd']
|
636
|
+
Cl = load_coefficients['Cl']
|
637
|
+
Cm = load_coefficients['Cm']
|
638
|
+
|
639
|
+
BqBq = 1/2*rho*V**2*B*np.array([[ 0 ],
|
640
|
+
[ D/B*Cd ],
|
641
|
+
[ Cl ],
|
642
|
+
[ -B*Cm ]])
|
643
|
+
return BqBq
|
644
|
+
|
645
|
+
def loadvector(T_el, Bq, T_wind, L, static = False):
|
646
|
+
|
647
|
+
G = np.zeros([12,4])
|
648
|
+
G[0,0] = L/2
|
649
|
+
G[1,1] = L/2
|
650
|
+
G[2,2] = L/2
|
651
|
+
G[3,3] = L/2
|
652
|
+
G[6,0] = L/2
|
653
|
+
G[7,1] = L/2
|
654
|
+
G[8,2] = L/2
|
655
|
+
G[9,3] = L/2
|
656
|
+
G[4,2] = -L**2/12
|
657
|
+
G[5,1] = L**2/12
|
658
|
+
G[10,2] = L**2/12
|
659
|
+
G[11,1] = -L**2/12
|
660
|
+
|
661
|
+
# Transform from wind coordinates to local element coordinates
|
662
|
+
|
663
|
+
T = T_el @ T_wind.T
|
664
|
+
|
665
|
+
T_full = blkdiag(T_el, 4) # Block diagonal - repeated 4 times to transform both trans and rot DOFs at each node (2+2)
|
666
|
+
|
667
|
+
# T_full.T transforms L-->G
|
668
|
+
if static is False:
|
669
|
+
R = T_full.T @ G @ Bq @ T
|
670
|
+
else:
|
671
|
+
R = T_full.T @ G @ Bq
|
672
|
+
|
673
|
+
R1 = R[0:6] # Element node 1
|
674
|
+
R2 = R[6:12] # Element node 2
|
675
|
+
|
676
|
+
|
677
|
+
return R1, R2
|
678
|
+
|
679
|
+
|
680
|
+
def windaction(omega, S, load_coefficients, elements, T_wind,
|
681
|
+
phi, B, D, U, omega_reduced=None, rho=1.225, print_progress=True,
|
682
|
+
section_lookup=None, nodes=None, admittance=None):
|
683
|
+
|
684
|
+
if nodes is None:
|
685
|
+
nodes = list(set([a for b in [el.nodes for el in elements] for a in b]))
|
686
|
+
|
687
|
+
n_dofs = 6
|
688
|
+
|
689
|
+
# Ensure that first omega value is not 0 when using logspace omega axis
|
690
|
+
if omega_reduced is None:
|
691
|
+
if np.min(omega) == 0:
|
692
|
+
omega_sorted = np.sort(omega)
|
693
|
+
omega_start = omega_sorted[1]
|
694
|
+
else:
|
695
|
+
omega_start = np.min(omega)
|
696
|
+
|
697
|
+
omega_reduced = np.logspace(np.log10(omega_start), np.log10(np.max(omega)), num=50) # A log frequency axis that is used to obtain the cross-spectral density matrix
|
698
|
+
|
699
|
+
genSqSq_reduced = np.zeros([phi.shape[1], phi.shape[1], len(omega_reduced)]) # Initialize the cross-spectral density matrix
|
700
|
+
|
701
|
+
# Establish RG matrix (common for all freqs)
|
702
|
+
|
703
|
+
|
704
|
+
if section_lookup is None:
|
705
|
+
lc_fun = lambda el: load_coefficients
|
706
|
+
B_fun = lambda el: B
|
707
|
+
D_fun = lambda el: D
|
708
|
+
admittance_fun = lambda el: admittance
|
709
|
+
else:
|
710
|
+
def get_sec(el):
|
711
|
+
for key in section_lookup:
|
712
|
+
if el in section_lookup[key]:
|
713
|
+
return key
|
714
|
+
|
715
|
+
lc_fun = lambda el: load_coefficients[get_sec(el)]
|
716
|
+
B_fun = lambda el: B[get_sec(el)]
|
717
|
+
D_fun = lambda el: D[get_sec(el)]
|
718
|
+
|
719
|
+
if admittance is None: # omit the frequency loop if ADmittance is not included - faster !
|
720
|
+
RG = np.zeros([len(nodes)*n_dofs, 3])
|
721
|
+
for el in elements:
|
722
|
+
node1_dofs = el.nodes[0].global_dofs
|
723
|
+
node2_dofs = el.nodes[1].global_dofs
|
724
|
+
|
725
|
+
mean_wind = U(el.get_cog())
|
726
|
+
Vn = normal_wind(T_wind, el.T0)*mean_wind # Find the normal wind
|
727
|
+
BqBq = loadmatrix_fe(Vn, lc_fun(el), rho, B_fun(el), D_fun(el))
|
728
|
+
R1, R2 = loadvector(el.T0, BqBq(1), T_wind, el.L) # Obtain the load vector for each element
|
729
|
+
|
730
|
+
RG[node1_dofs, :] = RG[node1_dofs, :] + R1 # Add the contribution from the element (end 1) to the system
|
731
|
+
RG[node2_dofs, :] = RG[node2_dofs, :] + R2 # Add the contribution from the element (end 2) to the system
|
732
|
+
|
733
|
+
# Make block matrix
|
734
|
+
RG_block = np.zeros([6*len(nodes), 3*len(nodes)])
|
735
|
+
|
736
|
+
for node in nodes:
|
737
|
+
ix = node.index
|
738
|
+
n = np.r_[6*ix:6*ix+6]
|
739
|
+
m = np.r_[3*ix:3*ix+3]
|
740
|
+
RG_block[np.ix_(n,m)] = RG[n,:] #verified with MATLAB version for beam example
|
741
|
+
|
742
|
+
for k, omega_k in enumerate(omega_reduced):
|
743
|
+
if print_progress:
|
744
|
+
pp(k+1, len(omega_reduced), sym='=', postfix=' ESTABLISHING WIND EXCITATION')
|
745
|
+
print('')
|
746
|
+
|
747
|
+
phiT_RG_block = phi.T @ RG_block
|
748
|
+
genSqSq_reduced[:, :, k] = phiT_RG_block @ S(omega_k) @ phiT_RG_block.T # to modal coordinates
|
749
|
+
|
750
|
+
else: # admittance is given - triple loop (the old way, slower)
|
751
|
+
admittance_fun = lambda el: admittance[get_sec(el)]
|
752
|
+
|
753
|
+
for k, omega_k in enumerate(omega_reduced):
|
754
|
+
if print_progress:
|
755
|
+
pp(k+1, len(omega_reduced), sym='=', postfix=' ESTABLISHING WIND EXCITATION')
|
756
|
+
print('')
|
757
|
+
|
758
|
+
# Establish RG matrix
|
759
|
+
RG = np.zeros([len(nodes)*n_dofs, 3])
|
760
|
+
|
761
|
+
for el in elements:
|
762
|
+
node1_dofs = el.nodes[0].global_dofs
|
763
|
+
node2_dofs = el.nodes[1].global_dofs
|
764
|
+
|
765
|
+
mean_wind = U(el.get_cog())
|
766
|
+
Vn = normal_wind(T_wind, el.T0)*mean_wind # Find the normal wind
|
767
|
+
BqBq = loadmatrix_fe(Vn, lc_fun(el), rho, B_fun(el), D_fun(el), admittance=admittance_fun(el))
|
768
|
+
R1, R2 = loadvector(el.T0, BqBq(omega_k), T_wind, el.L) # Obtain the load vector for each element
|
769
|
+
|
770
|
+
RG[node1_dofs, :] = RG[node1_dofs, :] + R1 # Add the contribution from the element (end 1) to the system
|
771
|
+
RG[node2_dofs, :] = RG[node2_dofs, :] + R2 # Add the contribution from the element (end 2) to the system
|
772
|
+
|
773
|
+
|
774
|
+
# Make block matrix
|
775
|
+
RG_block = np.zeros([6*len(nodes), 3*len(nodes)])
|
776
|
+
|
777
|
+
for node in nodes:
|
778
|
+
ix = node.index
|
779
|
+
n = np.r_[6*ix:6*ix+6]
|
780
|
+
m = np.r_[3*ix:3*ix+3]
|
781
|
+
RG_block[np.ix_(n,m)] = RG[n,:] #verified with MATLAB version for beam example
|
782
|
+
|
783
|
+
phiT_RG_block = phi.T @ RG_block
|
784
|
+
genSqSq_reduced[:, :, k] = phiT_RG_block @ S(omega_k) @ phiT_RG_block.T # to modal coordinates
|
785
|
+
|
786
|
+
|
787
|
+
# Interpolate results to full frequency axis
|
788
|
+
genSqSq = interp1d(omega_reduced, genSqSq_reduced, kind='quadratic', axis=2, fill_value=0, bounds_error=False)
|
789
|
+
|
790
|
+
return genSqSq
|
791
|
+
|
792
|
+
def windaction_static(load_coefficients, elements, T_wind,
|
793
|
+
phi, B, D, U, rho=1.225, print_progress=True,
|
794
|
+
section_lookup=None, nodes=None):
|
795
|
+
|
796
|
+
if nodes is None:
|
797
|
+
nodes = list(set([a for b in [el.nodes for el in elements] for a in b]))
|
798
|
+
|
799
|
+
n_dofs = 6
|
800
|
+
|
801
|
+
if section_lookup is None:
|
802
|
+
lc_fun = lambda el: load_coefficients
|
803
|
+
B_fun = lambda el: B
|
804
|
+
D_fun = lambda el: D
|
805
|
+
else:
|
806
|
+
def get_sec(el):
|
807
|
+
for key in section_lookup:
|
808
|
+
if el in section_lookup[key]:
|
809
|
+
return key
|
810
|
+
|
811
|
+
lc_fun = lambda el: load_coefficients[get_sec(el)]
|
812
|
+
B_fun = lambda el: B[get_sec(el)]
|
813
|
+
D_fun = lambda el: D[get_sec(el)]
|
814
|
+
|
815
|
+
# Establish RG matrix
|
816
|
+
RG = np.zeros([len(nodes)*n_dofs])
|
817
|
+
|
818
|
+
for el in elements:
|
819
|
+
node1_dofs = el.nodes[0].global_dofs
|
820
|
+
node2_dofs = el.nodes[1].global_dofs
|
821
|
+
|
822
|
+
mean_wind = U(el.get_cog())
|
823
|
+
Vn = normal_wind(T_wind, el.T0)*mean_wind # Find the normal wind
|
824
|
+
BqBq = loadmatrix_fe_static(Vn, lc_fun(el), rho, B_fun(el), D_fun(el))
|
825
|
+
R1, R2 = loadvector(el.T0, BqBq, T_wind, el.L, static = True) # Obtain the load vector for each element
|
826
|
+
|
827
|
+
RG[node1_dofs] = RG[node1_dofs] + R1[:,0] # Add the contribution from the element (end 1) to the system
|
828
|
+
RG[node2_dofs] = RG[node2_dofs] + R2[:,0] # Add the contribution from the element (end 2) to the system
|
829
|
+
|
830
|
+
# Make block matrix
|
831
|
+
RG_block = np.zeros([6*len(nodes)])
|
832
|
+
|
833
|
+
for node in nodes:
|
834
|
+
ix = node.index
|
835
|
+
n = np.r_[6*ix:6*ix+6]
|
836
|
+
RG_block[np.ix_(n)] = RG[n] #verified with MATLAB version for beam example
|
837
|
+
|
838
|
+
|
839
|
+
genSqSq = phi.T @ RG_block
|
840
|
+
|
841
|
+
return genSqSq
|
842
|
+
|
843
|
+
def K_from_ad(ad, V, w, B, rho):
|
844
|
+
if w==0:
|
845
|
+
k = np.zeros([3,3])
|
846
|
+
else:
|
847
|
+
v = V / (B*w) # reduced velocity
|
848
|
+
|
849
|
+
k = (0.5*rho*B**2*w**2 *
|
850
|
+
np.vstack([[ad['P4'](v), ad['P6'](v), -B*ad['P3'](v)],
|
851
|
+
[ad['H6'](v), ad['H4'](v), -B*ad['H3'](v)],
|
852
|
+
[-B*ad['A6'](v), -B*ad['A4'](v), B**2*ad['A3'](v)]]))
|
853
|
+
|
854
|
+
|
855
|
+
return k
|
856
|
+
|
857
|
+
|
858
|
+
def C_from_ad(ad, V, w, B, rho):
|
859
|
+
if w==0:
|
860
|
+
c = np.zeros([3,3])
|
861
|
+
else:
|
862
|
+
v = V / (B*w) # reduced velocity
|
863
|
+
|
864
|
+
c = (0.5*rho*B**2*w *
|
865
|
+
np.vstack([[ad['P1'](v), ad['P5'](v), -B*ad['P2'](v)],
|
866
|
+
[ad['H5'](v), ad['H1'](v), -B*ad['H2'](v)],
|
867
|
+
[-B*ad['A5'](v), -B*ad['A1'](v), B**2*ad['A2'](v)]]))
|
868
|
+
|
869
|
+
return c
|
870
|
+
|
871
|
+
|
872
|
+
def phi_aero_sum(mat, phi, x):
|
873
|
+
n_modes = phi.shape[1]
|
874
|
+
n_points = len(x)
|
875
|
+
|
876
|
+
mat_int = np.zeros([n_modes, n_modes, n_points])
|
877
|
+
|
878
|
+
for p in range(n_points):
|
879
|
+
phi_point = phi[p*6+1:p*6+4, :]
|
880
|
+
mat_int[:, :, p] = phi_point.T @ mat @ phi_point
|
881
|
+
|
882
|
+
mat = np.trapz(mat_int, x=x, axis=2)
|
883
|
+
|
884
|
+
return mat
|
885
|
+
|
886
|
+
|
887
|
+
def function_sum(fun, const, fun_factor=1):
|
888
|
+
def fsum(x):
|
889
|
+
if fun is None:
|
890
|
+
return const
|
891
|
+
else:
|
892
|
+
return fun(x)*fun_factor + const
|
893
|
+
|
894
|
+
return fsum
|
895
|
+
|
896
|
+
|
897
|
+
def get_aero_cont_adfun(ad_dict_fun, V, B, rho, phi, x):
|
898
|
+
def K(w):
|
899
|
+
n_modes = phi.shape[1]
|
900
|
+
n_points = len(x)
|
901
|
+
|
902
|
+
mat_int = np.zeros([n_modes, n_modes, n_points])
|
903
|
+
|
904
|
+
for p in range(n_points):
|
905
|
+
phi_point = phi[p*6+1:p*6+4, :]
|
906
|
+
kae = K_from_ad(ad_dict_fun(x[p]), V, w, B, rho)
|
907
|
+
mat_int[:, :, p] = phi_point.T @ kae @ phi_point
|
908
|
+
|
909
|
+
return np.trapz(mat_int, x=x, axis=2)
|
910
|
+
|
911
|
+
|
912
|
+
def C(w):
|
913
|
+
n_modes = phi.shape[1]
|
914
|
+
n_points = len(x)
|
915
|
+
|
916
|
+
mat_int = np.zeros([n_modes, n_modes, n_points])
|
917
|
+
|
918
|
+
for p in range(n_points):
|
919
|
+
phi_point = phi[p*6+1:p*6+4, :]
|
920
|
+
kae = C_from_ad(ad_dict_fun(x[p]), V, w, B, rho)
|
921
|
+
mat_int[:, :, p] = phi_point.T @ kae @ phi_point
|
922
|
+
|
923
|
+
return np.trapz(mat_int, x=x, axis=2)
|
924
|
+
|
925
|
+
|
926
|
+
return K, C
|
927
|
+
|
928
|
+
|
929
|
+
def get_aero_cont_addict(ad_dict, V, B, rho, phi, x):
|
930
|
+
def K(w):
|
931
|
+
kae = K_from_ad(ad_dict, V, w, B, rho)
|
932
|
+
return phi_aero_sum(kae, phi, x)
|
933
|
+
|
934
|
+
def C(w):
|
935
|
+
cae = C_from_ad(ad_dict, V, w, B, rho)
|
936
|
+
return phi_aero_sum(cae, phi, x)
|
937
|
+
|
938
|
+
return K, C
|
939
|
+
|
940
|
+
|
941
|
+
def itflutter_cont(Ms, Cs, Ks, phi, x, ad_dict, B, V=0.0, rho=1.225, dV=1,
|
942
|
+
overshoot_factor=0.5, itmax={}, omega_ref=None,
|
943
|
+
tol={}, print_progress=True, keep_all=False, track_by_psi=True):
|
944
|
+
|
945
|
+
if callable(ad_dict):
|
946
|
+
get_aero = get_aero_cont_adfun
|
947
|
+
else:
|
948
|
+
get_aero = get_aero_cont_addict
|
949
|
+
|
950
|
+
itmax_ = {'V':50, 'f': 15}
|
951
|
+
itmax_.update(**itmax)
|
952
|
+
itmax = dict(itmax_)
|
953
|
+
|
954
|
+
tol_ = {'V': 1e-3, 'f': 1e-4}
|
955
|
+
tol_.update(**tol)
|
956
|
+
tol = tol_
|
957
|
+
|
958
|
+
res = dict()
|
959
|
+
res['V'] = []
|
960
|
+
res['lambd'] = []
|
961
|
+
res['critical_mode'] = []
|
962
|
+
res['critical_psi'] = []
|
963
|
+
|
964
|
+
converged = False
|
965
|
+
psi_prev = None
|
966
|
+
|
967
|
+
if omega_ref is None:
|
968
|
+
A = statespace(Ks, Cs, Ms)
|
969
|
+
lambd_ref, psi = np.linalg.eig(A)
|
970
|
+
omega_initial = np.sort(np.abs(np.imag(lambd_ref)))[::2]
|
971
|
+
omega_ref = omega_initial[0]
|
972
|
+
|
973
|
+
for it_vel in range(itmax['V']):
|
974
|
+
Kae, Cae = get_aero(ad_dict, V, B, rho, phi, x)
|
975
|
+
getK = function_sum(Kae, Ks, fun_factor=-1)
|
976
|
+
getC = function_sum(Cae, Cs, fun_factor=-1)
|
977
|
+
getM = function_sum(None, Ms, fun_factor=-1)
|
978
|
+
|
979
|
+
lambd, psi, not_converged = iteig(getK, getC, getM, tol=tol['f'],
|
980
|
+
keep_full=True, mac_min=0.0, itmax=itmax['f'])
|
981
|
+
|
982
|
+
if len(not_converged)>0:
|
983
|
+
lambd[not_converged] = -np.inf + 0j
|
984
|
+
if print_progress:
|
985
|
+
if len(not_converged)<10:
|
986
|
+
nc_modes = 'index '+ ', '.join([str(i) for i in not_converged])
|
987
|
+
else:
|
988
|
+
nc_modes = '>10'
|
989
|
+
print(f'** Non-converged modes ({nc_modes}) from iterative eigensolution disregarded! **')
|
990
|
+
|
991
|
+
if it_vel!=0 and track_by_psi:
|
992
|
+
ixs, __, __, __ = restructure_as_ref(psi_prev, psi)
|
993
|
+
|
994
|
+
psi = psi[:, ixs]
|
995
|
+
lambd = lambd[ixs]
|
996
|
+
|
997
|
+
psi_prev = psi*1
|
998
|
+
|
999
|
+
critical_mode = np.argmax(np.real(lambd))
|
1000
|
+
real_lambd = np.max(np.real(lambd))
|
1001
|
+
critical_omega = np.abs(np.imag(lambd[critical_mode]))
|
1002
|
+
|
1003
|
+
if keep_all or real_lambd<=0:
|
1004
|
+
res['critical_mode'].append(critical_mode)
|
1005
|
+
res['lambd'].append(lambd)
|
1006
|
+
res['V'].append(V)
|
1007
|
+
res['critical_psi'].append(psi[:,critical_mode])
|
1008
|
+
|
1009
|
+
if dV < tol['V'] and real_lambd<=0:
|
1010
|
+
converged = True
|
1011
|
+
if print_progress:
|
1012
|
+
print(conv_text)
|
1013
|
+
print(f'Flutter estimated to occur at V = {V:.2f} m/s ({critical_omega:.2f} rad/s) ==> v = {V/(B*critical_omega):.2f})\n')
|
1014
|
+
|
1015
|
+
break
|
1016
|
+
elif real_lambd<0:
|
1017
|
+
if print_progress:
|
1018
|
+
print(f'Increasing velocity V = {V:.2f} --> {V+dV:.2f}.')
|
1019
|
+
V = V + dV
|
1020
|
+
else:
|
1021
|
+
if print_progress:
|
1022
|
+
print(f'Overshot. Reducing velocity V = {V:.2f} --> {V-dV/2:.2f}. Reducing step size dV = {dV:.2f} --> {dV/2:.2f}')
|
1023
|
+
|
1024
|
+
dV = overshoot_factor*dV # adjusting the velocity increment, and step backwards
|
1025
|
+
V = V - dV
|
1026
|
+
|
1027
|
+
if not converged and print_progress:
|
1028
|
+
print('Not able to converge within specified maximum iterations for specified tolerance criteria.')
|
1029
|
+
|
1030
|
+
res = {key: np.array(res[key]) for key in ['critical_mode', 'critical_psi', 'V', 'lambd']}
|
1031
|
+
|
1032
|
+
return res
|
1033
|
+
|
1034
|
+
|
1035
|
+
|
1036
|
+
def itflutter_cont_naive(Ms, Cs, Ks, phi, x, ad_dict, B, V=0.0, rho=1.225, dV=1,
|
1037
|
+
overshoot_factor=0.5, itmax={}, tol={}, print_progress=True):
|
1038
|
+
|
1039
|
+
|
1040
|
+
if callable(ad_dict):
|
1041
|
+
get_aero = get_aero_cont_adfun
|
1042
|
+
else:
|
1043
|
+
get_aero = get_aero_cont_addict
|
1044
|
+
|
1045
|
+
itmax_ = {'V':50, 'f': 15}
|
1046
|
+
itmax_.update(**itmax)
|
1047
|
+
itmax = itmax_
|
1048
|
+
|
1049
|
+
tol_ = {'V': 1e-3, 'f': 1e-4}
|
1050
|
+
tol_.update(**tol)
|
1051
|
+
tol = tol_
|
1052
|
+
|
1053
|
+
res = dict()
|
1054
|
+
res['V'] = []
|
1055
|
+
res['lambd'] = []
|
1056
|
+
res['critical_mode'] = []
|
1057
|
+
res['critical_psi'] = []
|
1058
|
+
|
1059
|
+
converged = False
|
1060
|
+
|
1061
|
+
for it_vel in range(itmax['V']):
|
1062
|
+
Kae, Cae = get_aero(ad_dict, V, B, rho, phi, x)
|
1063
|
+
getK = function_sum(Kae, Ks, fun_factor=-1)
|
1064
|
+
getC = function_sum(Cae, Cs, fun_factor=-1)
|
1065
|
+
getM = function_sum(None, Ms, fun_factor=-1)
|
1066
|
+
|
1067
|
+
lambd, psi = iteig_naive(getK, getC, getM, tol=tol['f'], itmax=itmax['f'])
|
1068
|
+
|
1069
|
+
complex_ix = np.imag(lambd) != 0
|
1070
|
+
|
1071
|
+
critical_mode = np.argmax(np.real(lambd[complex_ix]))
|
1072
|
+
critical_mode = np.where(complex_ix)[0][critical_mode]
|
1073
|
+
|
1074
|
+
real_lambd = np.max(np.real(lambd))
|
1075
|
+
critical_omega = np.abs(np.imag(lambd[critical_mode]))
|
1076
|
+
|
1077
|
+
if real_lambd<=0:
|
1078
|
+
res['critical_mode'].append(critical_mode)
|
1079
|
+
res['lambd'].append(lambd)
|
1080
|
+
res['V'].append(V)
|
1081
|
+
res['critical_psi'].append(psi[:,critical_mode])
|
1082
|
+
|
1083
|
+
if dV < tol['V'] and real_lambd<=0:
|
1084
|
+
|
1085
|
+
converged = True
|
1086
|
+
if print_progress:
|
1087
|
+
|
1088
|
+
print(conv_text)
|
1089
|
+
print(f'Flutter estimated to occur at V = {V:.2f} m/s ({critical_omega:.2f} rad/s) ==> v = {V/(B*critical_omega):.2f})\n')
|
1090
|
+
|
1091
|
+
break
|
1092
|
+
elif real_lambd<=0:
|
1093
|
+
if print_progress:
|
1094
|
+
print(f'Increasing velocity V = {V:.2f} --> {V+dV:.2f}.')
|
1095
|
+
V = V + dV
|
1096
|
+
else:
|
1097
|
+
if print_progress:
|
1098
|
+
print(f'Overshot. Reducing velocity V = {V:.2f} --> {V-dV/2:.2f}. Reducing step size dV = {dV:.2f} --> {dV/2:.2f}')
|
1099
|
+
|
1100
|
+
dV = overshoot_factor*dV # adjusting the velocity increment, and step backwards
|
1101
|
+
V = V - dV
|
1102
|
+
|
1103
|
+
if not converged and print_progress:
|
1104
|
+
print('Not able to converge within specified maximum iterations for specified tolerance criteria.')
|
1105
|
+
|
1106
|
+
res = {key: np.array(res[key]) for key in ['critical_mode', 'critical_psi', 'V', 'lambd']}
|
1107
|
+
|
1108
|
+
return res
|
1109
|
+
|