wawi 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
wawi/wind.py ADDED
@@ -0,0 +1,1108 @@
1
+ import numpy as np
2
+ from scipy.interpolate import interp1d
3
+ from .modal import statespace, iteig, restructure_as_ref, iteig_naive
4
+ from .tools import print_progress as pp
5
+ from scipy.special import jv as besselj, yv as bessely
6
+ from .general import rodrot, blkdiag
7
+ from .plot import plot_ads
8
+
9
+ conv_text='''
10
+ -----------------------------------------------------
11
+ | |
12
+ | ~ ~ ~~~ ~ ~~ ~ /^^^^^^^^^^^^\ 88ooo... . . . |
13
+ | ~ ~ ~ ~~ ~ ~ ~\____________/ 88ooo¨¨¨¨ ¨¨ |
14
+ | CONVERGED! |
15
+ -----------------------------------------------------
16
+ '''
17
+
18
+ beaufort_dict = {
19
+ 'calm': [0, 0.5],
20
+ 'light air': [0.5, 1.5],
21
+ 'light breeze': [1.6, 3.3],
22
+ 'gentle breeze': [3.4, 5.5],
23
+ 'moderate breeze': [5.6, 7.9],
24
+ 'fresh breeze': [8, 10.7],
25
+ 'strong breeze': [10.8, 13.8],
26
+ 'moderate gale': [13.9, 17.1],
27
+ 'gale': [17.2, 20.7],
28
+ 'strong gale': [20.8, 24.4],
29
+ 'storm': [24.5, 28.4],
30
+ 'violent storm': [28.5, 32.6],
31
+ 'hurricane': [32.7, np.inf]
32
+ }
33
+
34
+ def get_beaufort(U0):
35
+ return [key for key in beaufort_dict if inrange(U0, beaufort_dict[key])][0]
36
+
37
+ def inrange(num, rng):
38
+ return num<=np.max(rng) and num>=np.min(rng)
39
+
40
+ class LoadCoefficients:
41
+ keys = ['Cd', 'Cm', 'Cl', 'dCd', 'dCm', 'dCl']
42
+
43
+ def __repr__(self):
44
+ return 'LoadCoefficients (Cd, Cl, Cm, dCd, dCl, dCm)'
45
+
46
+ def __str__(self):
47
+ return f'Cd:{self.Cd}, dCd:{self.dCd}, Cl:{self.Cl}, dCl:{self.dCl}, Cm:{self.Cm}, dCm:{self.dCm}'
48
+
49
+ def __init__(self, Cd=None, dCd=None, Cl=None, dCl=None, Cm=None, dCm=None, fill_empty=True):
50
+ self.Cd = Cd
51
+ self.dCd = dCd
52
+ self.Cl = Cl
53
+ self.dCl = dCl
54
+ self.Cm = Cm
55
+ self.dCm = dCm
56
+
57
+ if fill_empty:
58
+ self.fill_empty_with_zeros()
59
+
60
+ def fill_empty_with_zeros(self):
61
+ for key in self.keys:
62
+ if getattr(self, key) is None:
63
+ setattr(self, key, 0)
64
+
65
+ def to_dict(self):
66
+ return {key: getattr(self, key) for key in self.keys}
67
+
68
+ class ADs:
69
+ ad_keys = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6',
70
+ 'H1', 'H2', 'H3', 'H4', 'H5', 'H6',
71
+ 'A1', 'A2', 'A3', 'A4', 'A5', 'A6']
72
+
73
+ P1, P2, P3, P4, P5, P6 = None, None, None, None, None, None
74
+ H1, H2, H3, H4, H5, H6 = None, None, None, None, None, None
75
+ A1, A2, A3, A4, A5, A6 = None, None, None, None, None, None
76
+
77
+ def __init__(self, ad_type='not specified',
78
+ P1=None, P2=None, P3=None, P4=None, P5=None, P6=None,
79
+ H1=None, H2=None, H3=None, H4=None, H5=None, H6=None,
80
+ A1=None, A2=None, A3=None, A4=None, A5=None, A6=None):
81
+
82
+ self.type = ad_type
83
+
84
+ self.P1 = P1
85
+ self.P2 = P2
86
+ self.P3 = P3
87
+ self.P4 = P4
88
+ self.P5 = P5
89
+ self.P6 = P6
90
+
91
+ self.H1 = H1
92
+ self.H2 = H2
93
+ self.H3 = H3
94
+ self.H4 = H4
95
+ self.H5 = H5
96
+ self.H6 = H6
97
+
98
+ self.A1 = A1
99
+ self.A2 = A2
100
+ self.A3 = A3
101
+ self.A4 = A4
102
+ self.A5 = A5
103
+ self.A6 = A6
104
+
105
+ def plot(self, v=np.arange(0,5,0.01), **kwargs):
106
+ return plot_ads(self.to_dict(), v, **kwargs)
107
+
108
+
109
+ def to_dict(self):
110
+ return {key: getattr(self, key) for key in self.ad_keys}
111
+
112
+ def evaluate_all(self, v):
113
+ AD_evaluated = dict()
114
+ for key in self.ad_keys:
115
+ AD_evaluated[key] = getattr(self, key)(v)
116
+
117
+ return AD_evaluated
118
+
119
+
120
+ def evaluate(self, key, v):
121
+ AD_evaluated = getattr(self, key)(v)
122
+
123
+ return AD_evaluated
124
+
125
+ def flatplate_ads():
126
+
127
+ ad_dict = dict()
128
+
129
+ def F(v):
130
+ J1 = besselj(1, 0.5/v)
131
+ Y1 = bessely(1, 0.5/v)
132
+ J0 = besselj(0, 0.5/v)
133
+ Y0 = bessely(0, 0.5/v)
134
+
135
+ a = J1 + Y0
136
+ b = Y1 - J0
137
+ c = a**2 + b**2
138
+
139
+ return (J1*a + Y1*b)/c
140
+
141
+ def G(v):
142
+ J1 = besselj(1, 0.5/v)
143
+ Y1 = bessely(1, 0.5/v)
144
+ J0 = besselj(0, 0.5/v)
145
+ Y0 = bessely(0, 0.5/v)
146
+
147
+ a = J1 + Y0
148
+ b = Y1 - J0
149
+ c = a**2 + b**2
150
+ return -(J1*J0 + Y1*Y0)/c
151
+
152
+ ad_dict['H1'] = lambda v: -2*np.pi*F(v)*v
153
+ ad_dict['H2'] = lambda v: np.pi/2*(1+F(v)+4*G(v)*v)*v
154
+ ad_dict['H3'] = lambda v: 2*np.pi*(F(v)*v-G(v)/4)*v
155
+ ad_dict['H4'] = lambda v: np.pi/2*(1+4*G(v)*v)
156
+ ad_dict['H5'] = lambda v: 0*v
157
+ ad_dict['H6'] = lambda v: 0*v
158
+
159
+ ad_dict['A1'] = lambda v: -np.pi/2*F(v)*v
160
+ ad_dict['A2'] = lambda v: -np.pi/8*(1-F(v)-4*G(v)*v)*v
161
+ ad_dict['A3'] = lambda v: np.pi/2*(F(v)*v-G(v)/4)*v
162
+ ad_dict['A4'] = lambda v: np.pi/2*G(v)*v
163
+ ad_dict['A5'] = lambda v: 0*v
164
+ ad_dict['A6'] = lambda v: 0*v
165
+
166
+ ad_dict['P1'] = lambda v: 0*v
167
+ ad_dict['P2'] = lambda v: 0*v
168
+ ad_dict['P3'] = lambda v: 0*v
169
+ ad_dict['P4'] = lambda v: 0*v
170
+ ad_dict['P4'] = lambda v: 0*v
171
+ ad_dict['P5'] = lambda v: 0*v
172
+ ad_dict['P6'] = lambda v: 0*v
173
+
174
+ return ad_dict
175
+
176
+
177
+ def quasisteady_ads(D, B, load_coefficients):
178
+ # Assuming load coeffs are normalized wrt. both D (Cd) and B (Cl and Cm) and ADs are
179
+ # normalized using B only.
180
+
181
+ if type(load_coefficients)==dict:
182
+ Cd = load_coefficients['Cd']
183
+ dCd = load_coefficients['dCd']
184
+ Cl = load_coefficients['Cl']
185
+ dCl = load_coefficients['dCl']
186
+ Cm = load_coefficients['Cm']
187
+ dCm = load_coefficients['dCm']
188
+ else:
189
+ Cd, dCd = load_coefficients.Cd, load_coefficients.dCd
190
+ Cl, dCl = load_coefficients.Cl, load_coefficients.dCl
191
+ Cm, dCm = load_coefficients.Cm, load_coefficients.dCm
192
+
193
+ ad_dict = dict()
194
+ ad_dict['P1'], ad_dict['P2'], ad_dict['P3'] = lambda v: -2*Cd*D/B*v, lambda v: 0*v, lambda v: dCd*D/B*v**2
195
+ ad_dict['P4'], ad_dict['P5'], ad_dict['P6'] = lambda v: 0*v, lambda v: (Cl-dCd*D/B)*v, lambda v: 0*v
196
+
197
+ ad_dict['H1'], ad_dict['H2'], ad_dict['H3'] = lambda v: -(dCl+Cd*D/B)*v, lambda v: 0*v, lambda v: dCl*v**2
198
+ ad_dict['H4'], ad_dict['H5'], ad_dict['H6'] = lambda v: 0*v, lambda v: -2*Cl*v, lambda v: 0*v
199
+
200
+ ad_dict['A1'], ad_dict['A2'], ad_dict['A3'] = lambda v: -dCm*v, lambda v: 0*v, lambda v: dCm*v**2
201
+ ad_dict['A4'], ad_dict['A5'], ad_dict['A6'] = lambda v: 0*v, lambda v: -2*Cm*v, lambda v: 0*v
202
+
203
+ return ad_dict
204
+
205
+
206
+ def compute_aero_matrices(U, AD, B, elements, T_wind, phi,
207
+ omega_reduced=None, print_progress=False, rho=1.225):
208
+
209
+ if omega_reduced is None:
210
+ omega_reduced = np.linspace(0.015, 2.0, 75)
211
+
212
+ n_modes = phi.shape[1]
213
+
214
+ Kae = np.zeros([n_modes, n_modes, len(omega_reduced)])
215
+ Cae = np.zeros([n_modes, n_modes, len(omega_reduced)])
216
+
217
+ for element_ix, element in enumerate(elements):
218
+
219
+ if callable(U):
220
+ U_el_glob = U(element.get_cog())
221
+ else:
222
+ U_el_glob = U*1
223
+
224
+ U_el = normal_wind(T_wind, element.T0, U=U_el_glob)
225
+
226
+ v = U_el/(B*omega_reduced)
227
+
228
+ for k, v_k in enumerate(v):
229
+ k_aero, c_aero = element_aero_mats(B, omega_reduced[k],
230
+ AD.evaluate_all(v_k),
231
+ element.L, T=element.T0,
232
+ phi=phi[element.global_dofs, :], rho=rho)
233
+
234
+ Kae[:, :, k] = Kae[:, :, k] + k_aero
235
+ Cae[:, :, k] = Cae[:, :, k] + c_aero
236
+
237
+ if print_progress:
238
+ pp(element_ix+1, len(elements), sym='=', postfix=' ESTABLISHING WIND EXCITATION')
239
+ print('')
240
+
241
+ Cae = interp1d(omega_reduced, Cae, kind='quadratic', fill_value='extrapolate', bounds_error=False)
242
+ Kae = interp1d(omega_reduced, Kae, kind='quadratic', fill_value='extrapolate', bounds_error=False)
243
+
244
+
245
+ return Kae, Cae
246
+
247
+
248
+ def compute_aero_matrices_sets(U, AD, B, elements, T_wind, phi_dict,
249
+ omega_reduced=None, omega=None, print_progress=False, sets=None):
250
+
251
+ if sets is None:
252
+ sets = elements.keys()
253
+
254
+ if omega is None:
255
+ return_as_function = True
256
+ else:
257
+ first_is_zero = omega[0]==0.0
258
+ if first_is_zero:
259
+ omega = omega[1:]
260
+
261
+ if omega_reduced is None:
262
+ omega_reduced = np.logspace(np.log10(0.01), np.log10(2), 100) #standard values should be reasonable in most typical cases - change later!
263
+
264
+ first_key = [str(key) for key in sets][0]
265
+ n_modes = np.shape(phi_dict[first_key])[1]
266
+
267
+ Kae = np.zeros([n_modes, n_modes, len(omega_reduced)])
268
+ Cae = np.zeros([n_modes, n_modes, len(omega_reduced)])
269
+
270
+ for set_name in sets:
271
+ B_set = B[set_name]
272
+ AD_set = AD[set_name]
273
+ phi = phi_dict[set_name]
274
+ elements_set = elements[set_name]
275
+
276
+ for element_ix, element in enumerate(elements_set):
277
+ T_el = element.T0
278
+ U_el = normal_wind(T_wind, T_el, U=U)
279
+ v = U_el/(B_set*omega_reduced)
280
+
281
+ dof_range = np.hstack([element.nodes[0].global_dofs, element.nodes[1].global_dofs])
282
+
283
+ for k, v_k in enumerate(v):
284
+ k_aero, c_aero = element_aero_mats(B_set, omega_reduced[k], AD_set.evaluate_all(v_k), element.L, T=T_el, phi=phi[dof_range, :])
285
+ Kae[:, :, k] += k_aero
286
+ Cae[:, :, k] += c_aero
287
+
288
+ if print_progress:
289
+ pp(element_ix+1, len(elements_set), sym='>', postfix=f' finished with set "{set_name}".')
290
+
291
+ if print_progress:
292
+ print('')
293
+
294
+ Cae = interp1d(omega_reduced, Cae, kind='quadratic',fill_value='extrapolate')
295
+ Kae = interp1d(omega_reduced, Kae, kind='quadratic', fill_value='extrapolate')
296
+
297
+ if return_as_function:
298
+ return Kae, Cae
299
+ else:
300
+ Cae = Cae(omega)
301
+ Kae = Kae(omega)
302
+
303
+ if first_is_zero:
304
+ Cae = np.insert(Cae, 0, Cae[:,:,0]*0, axis=2)
305
+ Kae = np.insert(Kae, 0, Kae[:,:,0]*0, axis=2)
306
+
307
+ return Kae, Cae
308
+
309
+ def mvregress_ads(beta):
310
+ ad_dict = dict()
311
+ ad_keys = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6',
312
+ 'H1', 'H2', 'H3', 'H4', 'H5', 'H6',
313
+ 'A1', 'A2', 'A3', 'A4', 'A5', 'A6']
314
+
315
+ for key in ad_keys:
316
+ ad_dict[key] = lambda v, key=key: 0
317
+
318
+ #TODO: FINALIZE, NOT FINISHED
319
+
320
+ return ad_dict
321
+
322
+
323
+ def f_rf_fun_legacy(a, d, v):
324
+ N = len(a)
325
+ f = 0j
326
+ for l in range(0, 3):
327
+ f = f + a[l] * (1j/v)**l
328
+
329
+ for l in range(0, N-3):
330
+ f = f + a[l+2]*(1j/v) / ((1j/v + d[l]))
331
+
332
+ f = f*v**2
333
+ return f
334
+
335
+
336
+ def f_rf_fun(a, d, v):
337
+ N = len(a)
338
+ f = np.array(a[0])*0j
339
+
340
+ for l in range(0, 3):
341
+ f = f + a[l] * (1j/v)**l
342
+
343
+ for l in range(0, N-3):
344
+ f = f + a[l+2]*(1j/v) / ((1j/v + d[l]))
345
+
346
+ f = f*v**2
347
+
348
+ return f
349
+
350
+
351
+ def rf_ads(a, d):
352
+ # B assumed to be implicitly included in RF factors
353
+ ad_dict = dict()
354
+ ad_keys = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6',
355
+ 'H1', 'H2', 'H3', 'H4', 'H5', 'H6',
356
+ 'A1', 'A2', 'A3', 'A4', 'A5', 'A6']
357
+
358
+ imag_component_ad = ['P1', 'P2', 'P5', 'H1', 'H2', 'H5', 'A1', 'A2', 'A5']
359
+
360
+ position_dict = {'P1': [0,0], 'P2': [0,2], 'P3': [0,2], 'P4': [0,0], 'P5': [0,1], 'P6': [0,1],
361
+ 'H1': [1,1], 'H2': [1,2], 'H3': [1,2], 'H4': [1,1], 'H5': [1,0], 'H6': [1,0],
362
+ 'A1': [2,1], 'A2': [2,2], 'A3': [2,2], 'A4': [2,1], 'A5': [2,0], 'A6': [2,0]}
363
+
364
+ for key in ad_keys:
365
+ row = position_dict[key][0]
366
+ col = position_dict[key][1]
367
+ a_key = [ai[row, col] for ai in a]
368
+
369
+ if key in imag_component_ad:
370
+ ad_dict[key] = lambda v, a=a_key: np.imag(f_rf_fun_legacy(a, d, v))
371
+ else:
372
+ ad_dict[key] = lambda v, a=a_key: np.real(f_rf_fun_legacy(a, d, v))
373
+
374
+ return ad_dict
375
+
376
+
377
+ def distribute_to_dict(prefix, array, count_start=1):
378
+ array_dict = dict()
379
+ for ix,array_i in enumerate(array):
380
+ key = prefix + str(ix+count_start)
381
+ array_dict[key] = array_i
382
+
383
+ return array_dict
384
+
385
+
386
+ def distribute_multi_to_dict(prefixes, arrays):
387
+ array_dict = dict()
388
+
389
+ for prefix_ix, prefix in enumerate(prefixes):
390
+ for ix, array_i in enumerate(arrays[prefix_ix]):
391
+ key = prefix + str(ix+1)
392
+ array_dict[key] = array_i
393
+
394
+ return array_dict
395
+
396
+
397
+ def unwrap_rf_parameters(parameters):
398
+ keys = list(parameters.keys())
399
+ a_ixs = np.where([word.startswith('a') for word in keys])[0]
400
+ d_ixs = np.where([word.startswith('d') for word in keys])[0]
401
+ a_nums = np.array([int(string.split('a')[1]) for string in np.array(keys)[a_ixs]])
402
+ d_nums = np.array([int(string.split('d')[1]) for string in np.array(keys)[d_ixs]])
403
+
404
+ a = [np.zeros([3,3])]*(max(a_nums))
405
+ d = [0]*(max(d_nums))
406
+
407
+ for a_num in a_nums:
408
+ a[a_num-1] = np.array(parameters['a%i' %a_num])
409
+
410
+ for d_num in d_nums:
411
+ d[d_num-1] = parameters['d%i' %d_num]
412
+
413
+ d = np.array(d)
414
+ return a,d
415
+
416
+
417
+ def normal_wind(T_g2wi, T_g2el, U=1.0):
418
+ T_wi2el = T_g2el @ T_g2wi.T
419
+ e_wind_local = (T_wi2el @ np.array([1, 0, 0])[np.newaxis,:].T).flatten()
420
+
421
+ Un = U * np.sqrt(e_wind_local[1]**2+e_wind_local[2]**2)
422
+ return Un
423
+
424
+
425
+ def el_mat_generic(Ayy,Ayz,Ayt,Azy,Azz,Azt,Aty,Atz,Att,L):
426
+ mat = np.zeros([12,12])
427
+
428
+ mat[0:6, 0:6] = np.array([
429
+ [0, 0, 0, 0, 0, 0 ],
430
+ [0, 156*Ayy, 156*Ayz, 147*Ayt, -22*L*Ayz, 22*L*Ayy ],
431
+ [0, 156*Azy, 156*Azz, 147*Azt, -22*L*Azz, 22*L*Azy ],
432
+ [0, 147*Aty, 147*Atz, 140*Att, -21*L*Atz, 21*L*Aty ],
433
+ [0, -22*L*Azy, -22*L*Azz, -21*L*Azt, 4*L**2*Azz, -4*L**2*Azy ],
434
+ [0, 22*L*Ayy, 22*L*Ayz, 21*L*Ayt, -4*L**2*Ayz, 4*L**2*Ayy ],
435
+ ])
436
+
437
+ mat[0:6, 6:12] = np.array([
438
+ [0, 0, 0, 0, 0, 0 ],
439
+ [0, 54*Ayy, 54*Ayz, 63*Ayt, 13*L*Ayz, -13*L*Ayy ],
440
+ [0, 54*Azy, 54*Azz, 63*Azt, 13*L*Azz, -13*L*Azy ],
441
+ [0, 63*Aty, 63*Atz, 70*Att, 14*L*Atz, -14*L*Aty ],
442
+ [0, -13*L*Azy, -13*L*Azz, -14*L*Azt, -3*L**2*Azz, 3*L**2*Azy ],
443
+ [0, 13*L*Ayy, 13*L*Ayz, 14*L*Ayt, 3*L**2*Ayz, -3*L**2*Ayy ],
444
+ ])
445
+
446
+ mat[6:12, 0:6] = np.array([
447
+ [0, 0, 0, 0, 0, 0 ],
448
+ [0, 54*Ayy, 54*Ayz, 63*Ayt, -13*L*Ayz, 13*L*Ayy ],
449
+ [0, 54*Azy, 54*Azz, 63*Azt, -13*L*Azz, 13*L*Azy ],
450
+ [0, 63*Aty, 63*Atz, 70*Att, -14*L*Atz, 14*L*Aty ],
451
+ [0, 13*L*Azy, 13*L*Azz, 14*L*Azt, -3*L**2*Azz, 3*L**2*Azy ],
452
+ [0, -13*L*Ayy, -13*L*Ayz, -14*L*Ayt, 3*L**2*Ayz, -3*L**2*Ayy ],
453
+ ])
454
+
455
+ mat[6:12,6:12] = np.array([
456
+ [0, 0, 0, 0, 0, 0 ],
457
+ [0, 156*Ayy, 156*Ayz, 147*Ayt, 22*L*Ayz, -22*L*Ayy ],
458
+ [0, 156*Azy, 156*Azz, 147*Azt, 22*L*Azz, -22*L*Azy ],
459
+ [0, 147*Aty, 147*Atz, 140*Att, 21*L*Atz, -21*L*Aty ],
460
+ [0, 22*L*Azy, 22*L*Azz, 21*L*Azt, 4*L**2*Azz, -4*L**2*Azy ],
461
+ [0, -22*L*Ayy, -22*L*Ayz, -21*L*Ayt, -4*L**2*Ayz, 4*L**2*Ayy ],
462
+ ])
463
+
464
+ return mat
465
+
466
+ def element_aero_mats(B, omega, ad_dict, L, T=None, phi=None, rho=1.225):
467
+ # Called for selected reduced velocity, specified by omega value (implicitly mean wind).
468
+ # Corresponding values of P,H and A are used for given mean wind velocity.
469
+
470
+ # Stiffness
471
+ Ayy = 1/2*rho*B**2*omega**2*ad_dict['P4']
472
+ Ayz = 1/2*rho*B**2*omega**2*ad_dict['P6']
473
+ Ayt = -1/2*rho*B**2*omega**2*B*ad_dict['P3']
474
+
475
+ Azy = 1/2*rho*B**2*omega**2*ad_dict['H6']
476
+ Azz = 1/2*rho*B**2*omega**2*ad_dict['H4']
477
+ Azt = -1/2*rho*B**2*omega**2*B*ad_dict['H3']
478
+
479
+ Aty = -1/2*rho*B**2*omega**2*B*ad_dict['A6']
480
+ Atz = -1/2*rho*B**2*omega**2*B*ad_dict['A4']
481
+ Att = 1/2*rho*B**2*omega**2*B**2*ad_dict['A3']
482
+
483
+ k_aero = L/420 * el_mat_generic(Ayy,Ayz,Ayt,Azy,Azz,Azt,Aty,Atz,Att,L)
484
+
485
+
486
+ # Damping
487
+ Ayy = 1/2*rho*B**2*omega*ad_dict['P1']
488
+ Ayz = 1/2*rho*B**2*omega*ad_dict['P5']
489
+ Ayt = -1/2*rho*B**2*omega*B*ad_dict['P2']
490
+
491
+ Azy = 1/2*rho*B**2*omega*ad_dict['H5']
492
+ Azz = 1/2*rho*B**2*omega*ad_dict['H1']
493
+ Azt = -1/2*rho*B**2*omega*B*ad_dict['H2']
494
+
495
+ Aty = -1/2*rho*B**2*omega*B*ad_dict['A5']
496
+ Atz = -1/2*rho*B**2*omega*B*ad_dict['A1']
497
+ Att = 1/2*rho*B**2*omega*B**2*ad_dict['A2']
498
+
499
+ c_aero = L/420 * el_mat_generic(Ayy,Ayz,Ayt,Azy,Azz,Azt,Aty,Atz,Att,L)
500
+
501
+ if (T is None and phi is None)!=True:
502
+ if T is not None: #if no transformation matrix is given, a local matrix is output
503
+ if np.shape(T)[0]==6:
504
+ T = np.kron(np.eye(2), T) #two times 6dof matrix, block diagonal
505
+ if np.shape(T)[0]==3:
506
+ T = np.kron(np.eye(4), T) #four times 3dof matrix, block diagonal
507
+ elif np.shape(T)[0]!=12:
508
+ raise ValueError('Wrong size of T (should be 3x3, 6x6 or 12x12')
509
+ else:
510
+ T = np.eye(12)
511
+
512
+ if phi is not None:
513
+ T = T @ phi
514
+
515
+ k_aero = T.T @ k_aero @ T
516
+ c_aero = T.T @ c_aero @ T
517
+
518
+ return k_aero, c_aero
519
+
520
+
521
+ # Spectra
522
+ def kaimal_auto(omega, Lx, A, sigma, V):
523
+ f = omega/(2*np.pi)
524
+ fhat = f*Lx/V
525
+ S = (sigma**2*(A*fhat)/(1+(1.5*A*fhat))**(5/3))/f
526
+
527
+ return S/(2*np.pi)
528
+
529
+ def von_Karman_auto(omega, Lx, sigma, V):
530
+
531
+ A1 = [
532
+ 0.0,
533
+ 0.0,
534
+ 755.2,
535
+ ]
536
+
537
+ A2 = [
538
+ 70.8,
539
+ 0.0,
540
+ 283.2,
541
+ ]
542
+
543
+ rr = [
544
+ 5/6,
545
+ 11/6,
546
+ 11/6,
547
+ ]
548
+
549
+ f = omega/(2*np.pi)
550
+ fhat = f*Lx/V
551
+ S = (sigma**2*( (4*fhat)*(1+A1*fhat**2) )/ (1+A2*fhat**2)**(rr))/f
552
+
553
+ return S/(2*np.pi)
554
+
555
+ def generic_kaimal_matrix(omega, nodes, T_wind, A, sigma, C, Lx, U, options=None):
556
+ # Adopted from MATLAB version. `nodes` is list with beef-nodes.
557
+ V = np.zeros(len(nodes)) # Initialize vector with mean wind in all nodes
558
+ Su = np.zeros([len(nodes), len(nodes)]) # One-point spectra for u component in all nodes
559
+ Sv = np.zeros([len(nodes), len(nodes)]) # One-point spectra for v component in all nodes
560
+ Sw = np.zeros([len(nodes), len(nodes)]) # One-point spectra for w component in all nodes
561
+ xyz = np.zeros([len(nodes), 3]) # Nodes in wind coordinate system
562
+
563
+ if options is None:
564
+ options = {
565
+ 'spectra_type': 'Kaimal'
566
+ }
567
+
568
+ for node_ix, node in enumerate(nodes):
569
+ xyz[node_ix,:] = (T_wind @ node.coordinates).T #Transform node coordinates to the wind coordinate system
570
+ V[node_ix] = U(node.coordinates) # Mean wind velocity in the nodes
571
+
572
+ if 'spectra_type' in options:
573
+ if options['spectra_type'] == 'vonKarman':
574
+ Su[node_ix,:], Sv[node_ix,:], Sw[node_ix,:] = von_Karman_auto(omega, Lx, sigma, V[node_ix])
575
+ elif options['spectra_type'] == 'Kaimal':
576
+ Su[node_ix,:], Sv[node_ix,:], Sw[node_ix,:] = kaimal_auto(omega, Lx, A, sigma, V[node_ix]) # One point spectra for u component in all nodes
577
+ else: # use Kaimal (default)
578
+ Su[node_ix,:], Sv[node_ix,:], Sw[node_ix,:] = kaimal_auto(omega, Lx, A, sigma, V[node_ix])
579
+
580
+ x = xyz[:, 0]
581
+ y = xyz[:, 1]
582
+ z = xyz[:, 2]
583
+
584
+ dxdx = x[np.newaxis,:] - x[np.newaxis,:].T # Matrix with all distances between nodes in x direction
585
+ dydy = y[np.newaxis,:] - y[np.newaxis,:].T # Matrix with all distances between nodes in y direction
586
+ dzdz = z[np.newaxis,:] - z[np.newaxis,:].T # Matrix with all distances between nodes in z direction
587
+
588
+ invV = 2/(V[np.newaxis,:]+V[np.newaxis,:].T) # Inverse mean wind velocity for all combination of nodes
589
+
590
+ Suu = np.sqrt(Su)*np.sqrt(Su).T*np.exp(
591
+ -invV*omega/(2*np.pi)*np.sqrt(
592
+ (C[0,0]*dxdx)**2 + (C[1,0]*dydy)**2 + (C[2,0]*dzdz)**2)
593
+ )
594
+
595
+ Svv = np.sqrt(Sv)*np.sqrt(Sv).T*np.exp(
596
+ -invV*omega/(2*np.pi)*np.sqrt(
597
+ (C[0,1]*dxdx)**2 + (C[1,1]*dydy)**2 + (C[2,1]*dzdz)**2)
598
+ )
599
+
600
+ Sww = np.sqrt(Sw)*np.sqrt(Sw).T*np.exp(
601
+ -invV*omega/(2*np.pi)*np.sqrt(
602
+ (C[0,2]*dxdx)**2 + (C[1,2]*dydy)**2 + (C[2,2]*dzdz)**2)
603
+ )
604
+
605
+ SvSv = np.zeros([3*len(nodes), 3*len(nodes)]) # Cross sectral density matrix containing all the turbulence components
606
+ SvSv[0::3, 0::3] = Suu
607
+ SvSv[1::3, 1::3] = Svv
608
+ SvSv[2::3, 2::3] = Sww
609
+
610
+ return SvSv
611
+
612
+
613
+ def loadmatrix_fe(V, load_coefficients, rho, B, D, Admittance = None):
614
+
615
+ if Admittance is None :
616
+ Admittance = lambda omega_k: np.ones( (4,3) )
617
+
618
+ Cd = load_coefficients['Cd']
619
+ dCd = load_coefficients['dCd']
620
+ Cl = load_coefficients['Cl']
621
+ dCl = load_coefficients['dCl']
622
+ Cm = load_coefficients['Cm']
623
+ dCm = load_coefficients['dCm']
624
+
625
+ # Equation 7 from Oiseth, 2010
626
+ BqBq = lambda omega_k: 1/2*rho*V*B*Admittance(omega_k*B/V/2/np.pi)*np.array([[0, 0, 0],
627
+ [0, 2*D/B*Cd, (D/B*dCd-Cl)],
628
+ [0, 2*Cl, (dCl+D/B*Cd)],
629
+ [0, -2*B*Cm, -B*dCm]])
630
+
631
+ return BqBq
632
+
633
+ def loadmatrix_fe_static(V, load_coefficients, rho, B, D ):
634
+
635
+ Cd = load_coefficients['Cd']
636
+ Cl = load_coefficients['Cl']
637
+ Cm = load_coefficients['Cm']
638
+
639
+ BqBq = 1/2*rho*V**2*B*np.array([[ 0, 0 , 0 ],
640
+ [ D/B*Cd, 0 , 0 ],
641
+ [ 0, 0 , Cl ],
642
+ [ 0, B*Cm , 0 ]])
643
+ return BqBq
644
+
645
+ def loadvector(T_el, Bq, T_wind, L, static = False):
646
+
647
+ G = np.zeros([12,4])
648
+ G[0,0] = L/2
649
+ G[1,1] = L/2
650
+ G[2,2] = L/2
651
+ G[3,3] = L/2
652
+ G[6,0] = L/2
653
+ G[7,1] = L/2
654
+ G[8,2] = L/2
655
+ G[9,3] = L/2
656
+ G[4,2] = -L**2/12
657
+ G[5,1] = L**2/12
658
+ G[10,2] = L**2/12
659
+ G[11,1] = -L**2/12
660
+
661
+ # Transform from wind coordinates to local element coordinates
662
+
663
+ if static is False:
664
+ T = T_el @ T_wind.T
665
+ else:
666
+ T = T_el @ T_wind.T @ np.ones( [3,1] )
667
+
668
+ T_full = blkdiag(T_el, 4) # Block diagonal - repeated 4 times to transform both trans and rot DOFs at each node (2+2)
669
+
670
+ # T_full.T transforms L-->G
671
+ R = T_full.T @ G @ Bq @ T
672
+ R1 = R[0:6] # Element node 1
673
+ R2 = R[6:12] # Element node 2
674
+
675
+
676
+ return R1, R2
677
+
678
+
679
+ def windaction(omega, S, load_coefficients, elements, T_wind,
680
+ phi, B, D, U, omega_reduced=None, rho=1.225, print_progress=True,
681
+ section_lookup=None, nodes=None, Admittance = None):
682
+
683
+ if nodes is None:
684
+ nodes = list(set([a for b in [el.nodes for el in elements] for a in b]))
685
+
686
+ n_dofs = 6
687
+
688
+ # Ensure that first omega value is not 0 when using logspace omega axis
689
+ if omega_reduced is None:
690
+ if np.min(omega) == 0:
691
+ omega_sorted = np.sort(omega)
692
+ omega_start = omega_sorted[1]
693
+ else:
694
+ omega_start = np.min(omega)
695
+
696
+ omega_reduced = np.logspace(np.log10(omega_start), np.log10(np.max(omega)), num=50) # A log frequency axis that is used to obtain the cross-spectral density matrix
697
+
698
+ genSqSq_reduced = np.zeros([phi.shape[1], phi.shape[1], len(omega_reduced)]) # Initialize the cross-spectral density matrix
699
+
700
+ # Establish RG matrix (common for all freqs)
701
+
702
+
703
+ if section_lookup is None:
704
+ lc_fun = lambda el: load_coefficients
705
+ B_fun = lambda el: B
706
+ D_fun = lambda el: D
707
+ Admittance_fun = lambda el: Admittance
708
+ else:
709
+ def get_sec(el):
710
+ for key in section_lookup:
711
+ if el in section_lookup[key]:
712
+ return key
713
+
714
+ lc_fun = lambda el: load_coefficients[get_sec(el)]
715
+ B_fun = lambda el: B[get_sec(el)]
716
+ D_fun = lambda el: D[get_sec(el)]
717
+
718
+ if Admittance is None: # omit the frequency loop if ADmittance is not included - faster !
719
+ RG = np.zeros([len(nodes)*n_dofs, 3])
720
+ for el in elements:
721
+ node1_dofs = el.nodes[0].global_dofs
722
+ node2_dofs = el.nodes[1].global_dofs
723
+
724
+ mean_wind = U(el.get_cog())
725
+ Vn = normal_wind(T_wind, el.T0)*mean_wind # Find the normal wind
726
+ BqBq = loadmatrix_fe(Vn, lc_fun(el), rho, B_fun(el), D_fun(el))
727
+ R1, R2 = loadvector(el.T0, BqBq, T_wind, el.L) # Obtain the load vector for each element
728
+
729
+ RG[node1_dofs, :] = RG[node1_dofs, :] + R1 # Add the contribution from the element (end 1) to the system
730
+ RG[node2_dofs, :] = RG[node2_dofs, :] + R2 # Add the contribution from the element (end 2) to the system
731
+
732
+ # Make block matrix
733
+ RG_block = np.zeros([6*len(nodes), 3*len(nodes)])
734
+
735
+ for node in nodes:
736
+ ix = node.index
737
+ n = np.r_[6*ix:6*ix+6]
738
+ m = np.r_[3*ix:3*ix+3]
739
+ RG_block[np.ix_(n,m)] = RG[n,:] #verified with MATLAB version for beam example
740
+
741
+ for k, omega_k in enumerate(omega_reduced):
742
+ if print_progress:
743
+ pp(k+1, len(omega_reduced), sym='=', postfix=' ESTABLISHING WIND EXCITATION')
744
+ print('')
745
+
746
+ phiT_RG_block = phi.T @ RG_block
747
+ genSqSq_reduced[:, :, k] = phiT_RG_block @ S(omega_k) @ phiT_RG_block.T # to modal coordinates
748
+
749
+ else: # admittance is given - triple loop (the old way, slower)
750
+ Admittance_fun = lambda el: Admittance[get_sec(el)]
751
+
752
+ for k, omega_k in enumerate(omega_reduced):
753
+ if print_progress:
754
+ pp(k+1, len(omega_reduced), sym='=', postfix=' ESTABLISHING WIND EXCITATION')
755
+ print('')
756
+
757
+ # Establish RG matrix
758
+ RG = np.zeros([len(nodes)*n_dofs, 3])
759
+
760
+ for el in elements:
761
+ node1_dofs = el.nodes[0].global_dofs
762
+ node2_dofs = el.nodes[1].global_dofs
763
+
764
+ mean_wind = U(el.get_cog())
765
+ Vn = normal_wind(T_wind, el.T0)*mean_wind # Find the normal wind
766
+ BqBq = loadmatrix_fe(Vn, lc_fun(el), rho, B_fun(el), D_fun(el), Admittance = Admittance_fun(el))
767
+ R1, R2 = loadvector(el.T0, BqBq(omega_k), T_wind, el.L) # Obtain the load vector for each element
768
+
769
+ RG[node1_dofs, :] = RG[node1_dofs, :] + R1 # Add the contribution from the element (end 1) to the system
770
+ RG[node2_dofs, :] = RG[node2_dofs, :] + R2 # Add the contribution from the element (end 2) to the system
771
+
772
+
773
+ # Make block matrix
774
+ RG_block = np.zeros([6*len(nodes), 3*len(nodes)])
775
+
776
+ for node in nodes:
777
+ ix = node.index
778
+ n = np.r_[6*ix:6*ix+6]
779
+ m = np.r_[3*ix:3*ix+3]
780
+ RG_block[np.ix_(n,m)] = RG[n,:] #verified with MATLAB version for beam example
781
+
782
+ phiT_RG_block = phi.T @ RG_block
783
+ genSqSq_reduced[:, :, k] = phiT_RG_block @ S(omega_k) @ phiT_RG_block.T # to modal coordinates
784
+
785
+
786
+ # Interpolate results to full frequency axis
787
+ genSqSq = interp1d(omega_reduced, genSqSq_reduced, kind='quadratic', axis=2, fill_value=0, bounds_error=False)
788
+
789
+ return genSqSq
790
+
791
+ def windaction_static(load_coefficients, elements, T_wind,
792
+ phi, B, D, U, rho=1.225, print_progress=True,
793
+ section_lookup=None, nodes=None):
794
+
795
+ if nodes is None:
796
+ nodes = list(set([a for b in [el.nodes for el in elements] for a in b]))
797
+
798
+ n_dofs = 6
799
+
800
+ if section_lookup is None:
801
+ lc_fun = lambda el: load_coefficients
802
+ B_fun = lambda el: B
803
+ D_fun = lambda el: D
804
+ else:
805
+ def get_sec(el):
806
+ for key in section_lookup:
807
+ if el in section_lookup[key]:
808
+ return key
809
+
810
+ lc_fun = lambda el: load_coefficients[get_sec(el)]
811
+ B_fun = lambda el: B[get_sec(el)]
812
+ D_fun = lambda el: D[get_sec(el)]
813
+
814
+ # Establish RG matrix
815
+ RG = np.zeros([len(nodes)*n_dofs])
816
+
817
+ for el in elements:
818
+ node1_dofs = el.nodes[0].global_dofs
819
+ node2_dofs = el.nodes[1].global_dofs
820
+
821
+ mean_wind = U(el.get_cog())
822
+ Vn = normal_wind(T_wind, el.T0)*mean_wind # Find the normal wind
823
+ BqBq = loadmatrix_fe_static(Vn, lc_fun(el), rho, B_fun(el), D_fun(el))
824
+ R1, R2 = loadvector(el.T0, BqBq, T_wind, el.L, static = True) # Obtain the load vector for each element
825
+
826
+ RG[node1_dofs] = RG[node1_dofs] + R1[:,0] # Add the contribution from the element (end 1) to the system
827
+ RG[node2_dofs] = RG[node2_dofs] + R2[:,0] # Add the contribution from the element (end 2) to the system
828
+
829
+ # Make block matrix
830
+ RG_block = np.zeros([6*len(nodes)])
831
+
832
+ for node in nodes:
833
+ ix = node.index
834
+ n = np.r_[6*ix:6*ix+6]
835
+ RG_block[np.ix_(n)] = RG[n] #verified with MATLAB version for beam example
836
+
837
+
838
+ genSqSq = phi.T @ RG_block
839
+
840
+ return genSqSq
841
+
842
+ def K_from_ad(ad, V, w, B, rho):
843
+ if w==0:
844
+ k = np.zeros([3,3])
845
+ else:
846
+ v = V / (B*w) # reduced velocity
847
+
848
+ k = (0.5*rho*B**2*w**2 *
849
+ np.vstack([[ad['P4'](v), ad['P6'](v), -B*ad['P3'](v)],
850
+ [ad['H6'](v), ad['H4'](v), -B*ad['H3'](v)],
851
+ [-B*ad['A6'](v), -B*ad['A4'](v), B**2*ad['A3'](v)]]))
852
+
853
+
854
+ return k
855
+
856
+
857
+ def C_from_ad(ad, V, w, B, rho):
858
+ if w==0:
859
+ c = np.zeros([3,3])
860
+ else:
861
+ v = V / (B*w) # reduced velocity
862
+
863
+ c = (0.5*rho*B**2*w *
864
+ np.vstack([[ad['P1'](v), ad['P5'](v), -B*ad['P2'](v)],
865
+ [ad['H5'](v), ad['H1'](v), -B*ad['H2'](v)],
866
+ [-B*ad['A5'](v), -B*ad['A1'](v), B**2*ad['A2'](v)]]))
867
+
868
+ return c
869
+
870
+
871
+ def phi_aero_sum(mat, phi, x):
872
+ n_modes = phi.shape[1]
873
+ n_points = len(x)
874
+
875
+ mat_int = np.zeros([n_modes, n_modes, n_points])
876
+
877
+ for p in range(n_points):
878
+ phi_point = phi[p*6+1:p*6+4, :]
879
+ mat_int[:, :, p] = phi_point.T @ mat @ phi_point
880
+
881
+ mat = np.trapz(mat_int, x=x, axis=2)
882
+
883
+ return mat
884
+
885
+
886
+ def function_sum(fun, const, fun_factor=1):
887
+ def fsum(x):
888
+ if fun is None:
889
+ return const
890
+ else:
891
+ return fun(x)*fun_factor + const
892
+
893
+ return fsum
894
+
895
+
896
+ def get_aero_cont_adfun(ad_dict_fun, V, B, rho, phi, x):
897
+ def K(w):
898
+ n_modes = phi.shape[1]
899
+ n_points = len(x)
900
+
901
+ mat_int = np.zeros([n_modes, n_modes, n_points])
902
+
903
+ for p in range(n_points):
904
+ phi_point = phi[p*6+1:p*6+4, :]
905
+ kae = K_from_ad(ad_dict_fun(x[p]), V, w, B, rho)
906
+ mat_int[:, :, p] = phi_point.T @ kae @ phi_point
907
+
908
+ return np.trapz(mat_int, x=x, axis=2)
909
+
910
+
911
+ def C(w):
912
+ n_modes = phi.shape[1]
913
+ n_points = len(x)
914
+
915
+ mat_int = np.zeros([n_modes, n_modes, n_points])
916
+
917
+ for p in range(n_points):
918
+ phi_point = phi[p*6+1:p*6+4, :]
919
+ kae = C_from_ad(ad_dict_fun(x[p]), V, w, B, rho)
920
+ mat_int[:, :, p] = phi_point.T @ kae @ phi_point
921
+
922
+ return np.trapz(mat_int, x=x, axis=2)
923
+
924
+
925
+ return K, C
926
+
927
+
928
+ def get_aero_cont_addict(ad_dict, V, B, rho, phi, x):
929
+ def K(w):
930
+ kae = K_from_ad(ad_dict, V, w, B, rho)
931
+ return phi_aero_sum(kae, phi, x)
932
+
933
+ def C(w):
934
+ cae = C_from_ad(ad_dict, V, w, B, rho)
935
+ return phi_aero_sum(cae, phi, x)
936
+
937
+ return K, C
938
+
939
+
940
+ def itflutter_cont(Ms, Cs, Ks, phi, x, ad_dict, B, V=0.0, rho=1.225, dV=1,
941
+ overshoot_factor=0.5, itmax={}, omega_ref=None,
942
+ tol={}, print_progress=True, keep_all=False, track_by_psi=True):
943
+
944
+ if callable(ad_dict):
945
+ get_aero = get_aero_cont_adfun
946
+ else:
947
+ get_aero = get_aero_cont_addict
948
+
949
+ itmax_ = {'V':50, 'f': 15}
950
+ itmax_.update(**itmax)
951
+ itmax = dict(itmax_)
952
+
953
+ tol_ = {'V': 1e-3, 'f': 1e-4}
954
+ tol_.update(**tol)
955
+ tol = tol_
956
+
957
+ res = dict()
958
+ res['V'] = []
959
+ res['lambd'] = []
960
+ res['critical_mode'] = []
961
+ res['critical_psi'] = []
962
+
963
+ converged = False
964
+ psi_prev = None
965
+
966
+ if omega_ref is None:
967
+ A = statespace(Ks, Cs, Ms)
968
+ lambd_ref, psi = np.linalg.eig(A)
969
+ omega_initial = np.sort(np.abs(np.imag(lambd_ref)))[::2]
970
+ omega_ref = omega_initial[0]
971
+
972
+ for it_vel in range(itmax['V']):
973
+ Kae, Cae = get_aero(ad_dict, V, B, rho, phi, x)
974
+ getK = function_sum(Kae, Ks, fun_factor=-1)
975
+ getC = function_sum(Cae, Cs, fun_factor=-1)
976
+ getM = function_sum(None, Ms, fun_factor=-1)
977
+
978
+ lambd, psi, not_converged = iteig(getK, getC, getM, tol=tol['f'],
979
+ keep_full=True, mac_min=0.0, itmax=itmax['f'])
980
+
981
+ if len(not_converged)>0:
982
+ lambd[not_converged] = -np.inf + 0j
983
+ if print_progress:
984
+ if len(not_converged)<10:
985
+ nc_modes = 'index '+ ', '.join([str(i) for i in not_converged])
986
+ else:
987
+ nc_modes = '>10'
988
+ print(f'** Non-converged modes ({nc_modes}) from iterative eigensolution disregarded! **')
989
+
990
+ if it_vel!=0 and track_by_psi:
991
+ ixs, __, __, __ = restructure_as_ref(psi_prev, psi)
992
+
993
+ psi = psi[:, ixs]
994
+ lambd = lambd[ixs]
995
+
996
+ psi_prev = psi*1
997
+
998
+ critical_mode = np.argmax(np.real(lambd))
999
+ real_lambd = np.max(np.real(lambd))
1000
+ critical_omega = np.abs(np.imag(lambd[critical_mode]))
1001
+
1002
+ if keep_all or real_lambd<=0:
1003
+ res['critical_mode'].append(critical_mode)
1004
+ res['lambd'].append(lambd)
1005
+ res['V'].append(V)
1006
+ res['critical_psi'].append(psi[:,critical_mode])
1007
+
1008
+ if dV < tol['V'] and real_lambd<=0:
1009
+ converged = True
1010
+ if print_progress:
1011
+ print(conv_text)
1012
+ print(f'Flutter estimated to occur at V = {V:.2f} m/s ({critical_omega:.2f} rad/s) ==> v = {V/(B*critical_omega):.2f})\n')
1013
+
1014
+ break
1015
+ elif real_lambd<0:
1016
+ if print_progress:
1017
+ print(f'Increasing velocity V = {V:.2f} --> {V+dV:.2f}.')
1018
+ V = V + dV
1019
+ else:
1020
+ if print_progress:
1021
+ print(f'Overshot. Reducing velocity V = {V:.2f} --> {V-dV/2:.2f}. Reducing step size dV = {dV:.2f} --> {dV/2:.2f}')
1022
+
1023
+ dV = overshoot_factor*dV # adjusting the velocity increment, and step backwards
1024
+ V = V - dV
1025
+
1026
+ if not converged and print_progress:
1027
+ print('Not able to converge within specified maximum iterations for specified tolerance criteria.')
1028
+
1029
+ res = {key: np.array(res[key]) for key in ['critical_mode', 'critical_psi', 'V', 'lambd']}
1030
+
1031
+ return res
1032
+
1033
+
1034
+
1035
+ def itflutter_cont_naive(Ms, Cs, Ks, phi, x, ad_dict, B, V=0.0, rho=1.225, dV=1,
1036
+ overshoot_factor=0.5, itmax={}, tol={}, print_progress=True):
1037
+
1038
+
1039
+ if callable(ad_dict):
1040
+ get_aero = get_aero_cont_adfun
1041
+ else:
1042
+ get_aero = get_aero_cont_addict
1043
+
1044
+ itmax_ = {'V':50, 'f': 15}
1045
+ itmax_.update(**itmax)
1046
+ itmax = itmax_
1047
+
1048
+ tol_ = {'V': 1e-3, 'f': 1e-4}
1049
+ tol_.update(**tol)
1050
+ tol = tol_
1051
+
1052
+ res = dict()
1053
+ res['V'] = []
1054
+ res['lambd'] = []
1055
+ res['critical_mode'] = []
1056
+ res['critical_psi'] = []
1057
+
1058
+ converged = False
1059
+
1060
+ for it_vel in range(itmax['V']):
1061
+ Kae, Cae = get_aero(ad_dict, V, B, rho, phi, x)
1062
+ getK = function_sum(Kae, Ks, fun_factor=-1)
1063
+ getC = function_sum(Cae, Cs, fun_factor=-1)
1064
+ getM = function_sum(None, Ms, fun_factor=-1)
1065
+
1066
+ lambd, psi = iteig_naive(getK, getC, getM, tol=tol['f'], itmax=itmax['f'])
1067
+
1068
+ complex_ix = np.imag(lambd) != 0
1069
+
1070
+ critical_mode = np.argmax(np.real(lambd[complex_ix]))
1071
+ critical_mode = np.where(complex_ix)[0][critical_mode]
1072
+
1073
+ real_lambd = np.max(np.real(lambd))
1074
+ critical_omega = np.abs(np.imag(lambd[critical_mode]))
1075
+
1076
+ if real_lambd<=0:
1077
+ res['critical_mode'].append(critical_mode)
1078
+ res['lambd'].append(lambd)
1079
+ res['V'].append(V)
1080
+ res['critical_psi'].append(psi[:,critical_mode])
1081
+
1082
+ if dV < tol['V'] and real_lambd<=0:
1083
+
1084
+ converged = True
1085
+ if print_progress:
1086
+
1087
+ print(conv_text)
1088
+ print(f'Flutter estimated to occur at V = {V:.2f} m/s ({critical_omega:.2f} rad/s) ==> v = {V/(B*critical_omega):.2f})\n')
1089
+
1090
+ break
1091
+ elif real_lambd<=0:
1092
+ if print_progress:
1093
+ print(f'Increasing velocity V = {V:.2f} --> {V+dV:.2f}.')
1094
+ V = V + dV
1095
+ else:
1096
+ if print_progress:
1097
+ print(f'Overshot. Reducing velocity V = {V:.2f} --> {V-dV/2:.2f}. Reducing step size dV = {dV:.2f} --> {dV/2:.2f}')
1098
+
1099
+ dV = overshoot_factor*dV # adjusting the velocity increment, and step backwards
1100
+ V = V - dV
1101
+
1102
+ if not converged and print_progress:
1103
+ print('Not able to converge within specified maximum iterations for specified tolerance criteria.')
1104
+
1105
+ res = {key: np.array(res[key]) for key in ['critical_mode', 'critical_psi', 'V', 'lambd']}
1106
+
1107
+ return res
1108
+