pyMOTO 1.4.0__py3-none-any.whl → 1.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyMOTO-1.4.0.dist-info → pyMOTO-1.5.1.dist-info}/METADATA +2 -2
- pyMOTO-1.5.1.dist-info/RECORD +29 -0
- {pyMOTO-1.4.0.dist-info → pyMOTO-1.5.1.dist-info}/WHEEL +1 -1
- pymoto/__init__.py +5 -5
- pymoto/common/domain.py +3 -1
- pymoto/common/dyadcarrier.py +56 -27
- pymoto/common/mma.py +70 -79
- pymoto/core_objects.py +103 -47
- pymoto/modules/assembly.py +146 -28
- pymoto/modules/io.py +80 -6
- pymoto/modules/linalg.py +84 -23
- pymoto/modules/scaling.py +2 -1
- pymoto/routines.py +13 -2
- pymoto/solvers/__init__.py +2 -2
- pymoto/solvers/auto_determine.py +1 -1
- pymoto/solvers/iterative.py +20 -16
- pymoto/solvers/matrix_checks.py +7 -3
- pymoto/solvers/solvers.py +44 -13
- pymoto/solvers/sparse.py +11 -0
- pyMOTO-1.4.0.dist-info/RECORD +0 -29
- {pyMOTO-1.4.0.dist-info → pyMOTO-1.5.1.dist-info}/LICENSE +0 -0
- {pyMOTO-1.4.0.dist-info → pyMOTO-1.5.1.dist-info}/top_level.txt +0 -0
- {pyMOTO-1.4.0.dist-info → pyMOTO-1.5.1.dist-info}/zip-safe +0 -0
pymoto/core_objects.py
CHANGED
@@ -107,6 +107,9 @@ class Signal:
|
|
107
107
|
return
|
108
108
|
if self.sensitivity is None:
|
109
109
|
self.sensitivity = copy.deepcopy(ds)
|
110
|
+
elif hasattr(self.sensitivity, "add_sensitivity"):
|
111
|
+
# Allow user to implement a custom add_sensitivity function instead of __iadd__
|
112
|
+
self.sensitivity.add_sensitivity(ds)
|
110
113
|
else:
|
111
114
|
self.sensitivity += ds
|
112
115
|
return self
|
@@ -183,14 +186,14 @@ class SignalSlice(Signal):
|
|
183
186
|
The sliced values are referenced to their original source Signal, such that they can be used and updated in modules.
|
184
187
|
This means that updating the values in this SignalSlice changes the data in its source Signal.
|
185
188
|
"""
|
186
|
-
def __init__(self,
|
187
|
-
self.
|
189
|
+
def __init__(self, base, sl, tag=None):
|
190
|
+
self.base = base
|
188
191
|
self.slice = sl
|
189
192
|
self.keep_alloc = False # Allocation must be False because sensitivity cannot be assigned with [] operator
|
190
193
|
|
191
194
|
# for s in slice:
|
192
195
|
if tag is None:
|
193
|
-
self.tag = f"{self.
|
196
|
+
self.tag = f"{self.base.tag}[{fmt_slice(self.slice)}]"
|
194
197
|
else:
|
195
198
|
self.tag = tag
|
196
199
|
|
@@ -200,7 +203,7 @@ class SignalSlice(Signal):
|
|
200
203
|
@property
|
201
204
|
def state(self):
|
202
205
|
try:
|
203
|
-
return None if self.
|
206
|
+
return None if self.base.state is None else self.base.state[self.slice]
|
204
207
|
except Exception as e:
|
205
208
|
# Possibilities: Unslicable object (TypeError) or Wrong dimensions or out of range (IndexError)
|
206
209
|
raise type(e)(str(e) + "\n\t| Above error was raised in SignalSlice.state (getter). Signal details:" +
|
@@ -209,7 +212,7 @@ class SignalSlice(Signal):
|
|
209
212
|
@state.setter
|
210
213
|
def state(self, new_state):
|
211
214
|
try:
|
212
|
-
self.
|
215
|
+
self.base.state[self.slice] = new_state
|
213
216
|
except Exception as e:
|
214
217
|
# Possibilities: Unslicable object (TypeError) or Wrong dimensions or out of range (IndexError)
|
215
218
|
raise type(e)(str(e) + "\n\t| Above error was raised in SignalSlice.state (setter). Signal details:" +
|
@@ -218,7 +221,7 @@ class SignalSlice(Signal):
|
|
218
221
|
@property
|
219
222
|
def sensitivity(self):
|
220
223
|
try:
|
221
|
-
return None if self.
|
224
|
+
return None if self.base.sensitivity is None else self.base.sensitivity[self.slice]
|
222
225
|
except Exception as e:
|
223
226
|
# Possibilities: Unslicable object (TypeError) or Wrong dimensions or out of range (IndexError)
|
224
227
|
raise type(e)(str(e) + "\n\t| Above error was raised in SignalSlice.sensitivity (getter). Signal details:" +
|
@@ -227,26 +230,55 @@ class SignalSlice(Signal):
|
|
227
230
|
@sensitivity.setter
|
228
231
|
def sensitivity(self, new_sens):
|
229
232
|
try:
|
230
|
-
if self.
|
233
|
+
if self.base.sensitivity is None:
|
234
|
+
# Initialize sensitivity of base-signal
|
231
235
|
if new_sens is None:
|
232
236
|
return # Sensitivity doesn't need to be initialized when it is set to None
|
233
237
|
try:
|
234
|
-
self.
|
238
|
+
self.base.sensitivity = self.base.state * 0 # Make a new copy with 0 values
|
235
239
|
except TypeError:
|
236
|
-
if self.
|
240
|
+
if self.base.state is None:
|
237
241
|
raise TypeError("Could not initialize sensitivity because state is not set" + self._err_str())
|
238
242
|
else:
|
239
|
-
raise TypeError(f"Could not initialize sensitivity for type \'{type(self.
|
243
|
+
raise TypeError(f"Could not initialize sensitivity for type \'{type(self.base.state).__name__}\'")
|
240
244
|
|
241
245
|
if new_sens is None:
|
242
246
|
new_sens = 0 # reset() uses this
|
243
247
|
|
244
|
-
self.
|
248
|
+
self.base.sensitivity[self.slice] = new_sens
|
245
249
|
except Exception as e:
|
246
250
|
# Possibilities: Unslicable object (TypeError) or Wrong dimensions or out of range (IndexError)
|
247
251
|
raise type(e)(str(e) + "\n\t| Above error was raised in SignalSlice.state (setter). Signal details:" +
|
248
252
|
self._err_str()).with_traceback(sys.exc_info()[2])
|
249
253
|
|
254
|
+
def add_sensitivity(self, ds: Any):
|
255
|
+
""" Add a new term to internal sensitivity """
|
256
|
+
try:
|
257
|
+
if ds is None:
|
258
|
+
return
|
259
|
+
if self.base.sensitivity is None:
|
260
|
+
self.base.sensitivity = self.base.state * 0
|
261
|
+
# self.sensitivity = copy.deepcopy(ds)
|
262
|
+
|
263
|
+
if hasattr(self.sensitivity, "add_sensitivity"):
|
264
|
+
# Allow user to implement a custom add_sensitivity function instead of __iadd__
|
265
|
+
self.sensitivity.add_sensitivity(ds)
|
266
|
+
else:
|
267
|
+
self.sensitivity += ds
|
268
|
+
return self
|
269
|
+
except TypeError:
|
270
|
+
if isinstance(ds, type(self.sensitivity)):
|
271
|
+
raise TypeError(
|
272
|
+
f"Cannot add to the sensitivity with type '{type(self.sensitivity).__name__}'" + self._err_str())
|
273
|
+
else:
|
274
|
+
raise TypeError(
|
275
|
+
f"Adding wrong type '{type(ds).__name__}' to the sensitivity '{type(self.sensitivity).__name__}'" + self._err_str())
|
276
|
+
except ValueError:
|
277
|
+
sens_shape = self.sensitivity.shape if hasattr(self.sensitivity, 'shape') else ()
|
278
|
+
ds_shape = ds.shape if hasattr(ds, 'shape') else ()
|
279
|
+
raise ValueError(
|
280
|
+
f"Cannot add argument of shape {ds_shape} to the sensitivity of shape {sens_shape}" + self._err_str()) from None
|
281
|
+
|
250
282
|
def reset(self, keep_alloc: bool = None):
|
251
283
|
""" Reset the sensitivities to zero or None
|
252
284
|
This must be called to clear internal memory of subsequent sensitivity calculations.
|
@@ -405,7 +437,7 @@ class Module(ABC, RegisteredClass):
|
|
405
437
|
>> Module([inputs])
|
406
438
|
|
407
439
|
Using keywords:
|
408
|
-
>> Module(sig_in=[inputs], sig_out=[outputs]
|
440
|
+
>> Module(sig_in=[inputs], sig_out=[outputs])
|
409
441
|
"""
|
410
442
|
|
411
443
|
def _err_str(self, module_signature: bool = True, init: bool = True, fn=None):
|
@@ -553,62 +585,74 @@ class Module(ABC, RegisteredClass):
|
|
553
585
|
class Network(Module):
|
554
586
|
""" Binds multiple Modules together as one Module
|
555
587
|
|
588
|
+
Initialize a network with a number of modules that should be executed consecutively
|
556
589
|
>> Network(module1, module2, ...)
|
557
590
|
|
558
591
|
>> Network([module1, module2, ...])
|
559
592
|
|
560
593
|
>> Network((module1, module2, ...))
|
561
594
|
|
595
|
+
Modules can also be constructed using a dictionary based on strings
|
562
596
|
>> Network([ {type="module1", sig_in=[sig1, sig2], sig_out=[sig3]},
|
563
597
|
{type="module2", sig_in=[sig3], sig_out=[sig4]} ])
|
564
598
|
|
599
|
+
Appending modules to a network will output the signals automatically
|
600
|
+
>> fn = Network()
|
601
|
+
>> s_out = fn.append(module1)
|
602
|
+
|
603
|
+
Args:
|
604
|
+
print_timing: Print timing of each module inside this Network
|
565
605
|
"""
|
566
606
|
def __init__(self, *args, print_timing=False):
|
607
|
+
super().__init__()
|
567
608
|
self._init_loc = get_init_str()
|
568
|
-
|
569
|
-
#
|
570
|
-
self.mods = _parse_to_list(*args)
|
571
|
-
|
572
|
-
# Check if the blocks are initialized, else create them
|
573
|
-
for i, b in enumerate(self.mods):
|
574
|
-
if isinstance(b, dict):
|
575
|
-
exclude_keys = ['type']
|
576
|
-
b_ex = {k: b[k] for k in set(list(b.keys())) - set(exclude_keys)}
|
577
|
-
self.mods[i] = Module.create(b['type'], **b_ex)
|
578
|
-
|
579
|
-
# Check validity of modules
|
580
|
-
for m in self.mods:
|
581
|
-
if not _is_valid_module(m):
|
582
|
-
raise TypeError(f"Argument is not a valid Module, type=\'{type(mod).__name__}\'.")
|
583
|
-
|
584
|
-
# Gather all the input and output signals of the internal blocks
|
585
|
-
all_in = set()
|
586
|
-
all_out = set()
|
587
|
-
[all_in.update(b.sig_in) for b in self.mods]
|
588
|
-
[all_out.update(b.sig_out) for b in self.mods]
|
589
|
-
in_unique = all_in - all_out
|
590
|
-
|
591
|
-
# Initialize the parent module, with correct inputs and outputs
|
592
|
-
super().__init__(list(in_unique), list(all_out))
|
593
|
-
|
609
|
+
self.mods = [] # Empty module list
|
610
|
+
self.append(*args) # Append to module list
|
594
611
|
self.print_timing = print_timing
|
595
612
|
|
596
|
-
def timefn(self, fn):
|
613
|
+
def timefn(self, fn, name=None):
|
597
614
|
start_t = time.time()
|
598
615
|
fn()
|
599
|
-
|
616
|
+
duration = time.time() - start_t
|
617
|
+
if name is None:
|
618
|
+
name = f"{fn}"
|
619
|
+
if isinstance(self.print_timing, bool):
|
620
|
+
tmin = 0.0
|
621
|
+
else:
|
622
|
+
tmin = self.print_timing
|
623
|
+
if duration > tmin:
|
624
|
+
print(f"{name} took {time.time() - start_t} s")
|
600
625
|
|
601
626
|
def response(self):
|
602
|
-
if self.print_timing:
|
603
|
-
|
627
|
+
if self.print_timing is not False:
|
628
|
+
start_t = time.time()
|
629
|
+
[self.timefn(m.response, name=f"-- Response of \"{type(m).__name__}\"") for m in self.mods]
|
630
|
+
duration = time.time() - start_t
|
631
|
+
if isinstance(self.print_timing, bool):
|
632
|
+
tmin = 0.0
|
633
|
+
else:
|
634
|
+
tmin = self.print_timing
|
635
|
+
if duration > tmin:
|
636
|
+
print(f"-- TOTAL Response took {time.time() - start_t} s")
|
604
637
|
else:
|
605
|
-
[
|
638
|
+
[m.response() for m in self.mods]
|
606
639
|
|
607
640
|
def sensitivity(self):
|
608
|
-
|
641
|
+
if self.print_timing is not False:
|
642
|
+
start_t = time.time()
|
643
|
+
[self.timefn(m.sensitivity, name=f"-- Sensitivity of \"{type(m).__name__}\"") for m in reversed(self.mods)]
|
644
|
+
duration = time.time() - start_t
|
645
|
+
if isinstance(self.print_timing, bool):
|
646
|
+
tmin = 0.0
|
647
|
+
else:
|
648
|
+
tmin = self.print_timing
|
649
|
+
if duration > tmin:
|
650
|
+
print(f"-- TOTAL Sensitivity took {time.time() - start_t} s")
|
651
|
+
else:
|
652
|
+
[m.sensitivity() for m in reversed(self.mods)]
|
609
653
|
|
610
654
|
def reset(self):
|
611
|
-
[
|
655
|
+
[m.reset() for m in reversed(self.mods)]
|
612
656
|
|
613
657
|
def _response(self, *args):
|
614
658
|
pass # Unused
|
@@ -628,13 +672,25 @@ class Network(Module):
|
|
628
672
|
def __iter__(self):
|
629
673
|
return iter(self.mods)
|
630
674
|
|
675
|
+
def __call__(self, *args):
|
676
|
+
return self.append(*args)
|
677
|
+
|
631
678
|
def append(self, *newmods):
|
632
679
|
modlist = _parse_to_list(*newmods)
|
680
|
+
if len(modlist) == 0:
|
681
|
+
return
|
633
682
|
|
634
683
|
# Check if the blocks are initialized, else create them
|
684
|
+
for i, m in enumerate(modlist):
|
685
|
+
if isinstance(m, dict):
|
686
|
+
exclude_keys = ['type']
|
687
|
+
b_ex = {k: m[k] for k in set(list(m.keys())) - set(exclude_keys)}
|
688
|
+
modlist[i] = Module.create(m['type'], **b_ex)
|
689
|
+
|
690
|
+
# Check validity of modules
|
635
691
|
for i, m in enumerate(modlist):
|
636
692
|
if not _is_valid_module(m):
|
637
|
-
raise TypeError(f"Argument #{i} is not a valid module, type=\'{type(
|
693
|
+
raise TypeError(f"Argument #{i} is not a valid module, type=\'{type(m).__name__}\'.")
|
638
694
|
|
639
695
|
# Obtain the internal blocks
|
640
696
|
self.mods.extend(modlist)
|
@@ -649,4 +705,4 @@ class Network(Module):
|
|
649
705
|
self.sig_in = _parse_to_list(in_unique)
|
650
706
|
self.sig_out = _parse_to_list(all_out)
|
651
707
|
|
652
|
-
return modlist[-1].sig_out[0] if len(modlist[-1].sig_out) == 1 else modlist[-1].sig_out
|
708
|
+
return modlist[-1].sig_out[0] if len(modlist[-1].sig_out) == 1 else modlist[-1].sig_out
|
pymoto/modules/assembly.py
CHANGED
@@ -57,20 +57,21 @@ class AssembleGeneral(Module):
|
|
57
57
|
self.bc = bc
|
58
58
|
self.bcdiagval = np.max(element_matrix) if bcdiagval is None else bcdiagval
|
59
59
|
if bc is not None:
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
self.
|
64
|
-
self.cols = np.concatenate((self.cols[self.bcselect], self.bc))
|
60
|
+
bc_inds = np.bitwise_or(np.isin(self.rows, self.bc), np.isin(self.cols, self.bc))
|
61
|
+
self.bcselect = np.argwhere(np.bitwise_not(bc_inds)).flatten()
|
62
|
+
self.bcrows = np.concatenate((self.rows[self.bcselect], self.bc))
|
63
|
+
self.bccols = np.concatenate((self.cols[self.bcselect], self.bc))
|
65
64
|
else:
|
66
65
|
self.bcselect = None
|
66
|
+
self.bcrows = self.rows
|
67
|
+
self.bccols = self.cols
|
67
68
|
|
68
69
|
self.add_constant = add_constant
|
69
70
|
|
70
71
|
def _response(self, xscale: np.ndarray):
|
71
72
|
nel = self.dofconn.shape[0]
|
72
73
|
assert xscale.size == nel, f"Input vector wrong size ({xscale.size}), must be of size #nel ({nel})"
|
73
|
-
scaled_el = (
|
74
|
+
scaled_el = (self.elmat.flatten() * xscale[..., np.newaxis]).flatten()
|
74
75
|
|
75
76
|
# Set boundary conditions
|
76
77
|
if self.bc is not None:
|
@@ -80,7 +81,7 @@ class AssembleGeneral(Module):
|
|
80
81
|
mat_values = scaled_el
|
81
82
|
|
82
83
|
try:
|
83
|
-
mat = self.matrix_type((mat_values, (self.
|
84
|
+
mat = self.matrix_type((mat_values, (self.bcrows, self.bccols)), shape=(self.n, self.n))
|
84
85
|
except TypeError as e:
|
85
86
|
raise type(e)(str(e) + "\n\tInvalid matrix_type={}. Either scipy.sparse.cscmatrix or "
|
86
87
|
"scipy.sparse.csrmatrix are supported"
|
@@ -96,14 +97,16 @@ class AssembleGeneral(Module):
|
|
96
97
|
if self.bc is not None:
|
97
98
|
dgdmat[self.bc, :] = 0.0
|
98
99
|
dgdmat[:, self.bc] = 0.0
|
100
|
+
dx = np.zeros_like(self.sig_in[0].state)
|
99
101
|
if isinstance(dgdmat, np.ndarray):
|
100
|
-
dx = np.zeros_like(self.sig_in[0].state)
|
101
102
|
for i in range(len(dx)):
|
102
103
|
indu, indv = np.meshgrid(self.dofconn[i], self.dofconn[i], indexing='ij')
|
103
|
-
|
104
|
-
|
104
|
+
dxi = einsum("ij,ij->", self.elmat, dgdmat[indu, indv])
|
105
|
+
dx[i] = np.real(dxi) if np.isrealobj(dx) else dxi
|
105
106
|
elif isinstance(dgdmat, DyadCarrier):
|
106
|
-
|
107
|
+
dxi = dgdmat.contract(self.elmat, self.dofconn, self.dofconn)
|
108
|
+
dx[:] = np.real(dxi) if np.isrealobj(dx) else dxi
|
109
|
+
return dx
|
107
110
|
|
108
111
|
|
109
112
|
def get_B(dN_dx, voigt=True):
|
@@ -123,7 +126,7 @@ def get_B(dN_dx, voigt=True):
|
|
123
126
|
"""
|
124
127
|
n_dim, n_shapefn = dN_dx.shape
|
125
128
|
n_strains = int((n_dim * (n_dim+1))/2) # Triangular number: ndim=3 -> nstrains = 3+2+1
|
126
|
-
B = np.zeros((n_strains, n_shapefn*n_dim))
|
129
|
+
B = np.zeros((n_strains, n_shapefn*n_dim), dtype=dN_dx.dtype)
|
127
130
|
if n_dim == 1:
|
128
131
|
for i in range(n_shapefn):
|
129
132
|
B[i, 0] = dN_dx[i, 0]
|
@@ -222,7 +225,8 @@ class AssembleStiffness(AssembleGeneral):
|
|
222
225
|
ndof = nnode*domain.dim
|
223
226
|
|
224
227
|
# Element stiffness matrix
|
225
|
-
|
228
|
+
dtype = np.result_type(D, domain.element_size.dtype)
|
229
|
+
self.stiffness_element = np.zeros((ndof, ndof), dtype=dtype)
|
226
230
|
|
227
231
|
# Numerical integration
|
228
232
|
siz = domain.element_size
|
@@ -325,29 +329,47 @@ class ElementOperation(Module):
|
|
325
329
|
|
326
330
|
:math:`y_e = \mathbf{B} \mathbf{u}_e`
|
327
331
|
|
332
|
+
This module is the reverse of :py:class:`pymoto.NodalOperation`.
|
333
|
+
|
328
334
|
Input Signal:
|
329
|
-
- ``u``: Nodal vector of size ``(
|
335
|
+
- ``u``: Nodal vector of size ``(#dofs_per_node * #nodes)``
|
330
336
|
|
331
337
|
Output Signal:
|
332
|
-
- ``y``: Elemental output data of size ``(..., #elements)``
|
338
|
+
- ``y``: Elemental output data of size ``(..., #elements)`` or ``(#dofs, ..., #elements)``
|
333
339
|
|
334
340
|
Args:
|
335
341
|
domain: The domain defining element and nodal connectivity
|
336
|
-
element_matrix: The element operator matrix :math:`\mathbf{B}` of size ``(...,
|
342
|
+
element_matrix: The element operator matrix :math:`\mathbf{B}` of size ``(..., #dofs_per_element)`` or ``(..., #nodes_per_element)``
|
337
343
|
"""
|
338
344
|
def _prepare(self, domain: DomainDefinition, element_matrix: np.ndarray):
|
339
345
|
if element_matrix.shape[-1] % domain.elemnodes != 0:
|
340
|
-
raise IndexError(
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
346
|
+
raise IndexError(
|
347
|
+
f"Size of last dimension of element operator matrix ({element_matrix.shape[-1]}) is not compatible "
|
348
|
+
f"with mesh. Must be dividable by the number of nodes per element ({domain.elemnodes})."
|
349
|
+
)
|
350
|
+
self.domain = domain
|
345
351
|
self.element_matrix = element_matrix
|
346
|
-
self.dofconn =
|
347
|
-
self.usiz = ndof * domain.nnodes
|
352
|
+
self.dofconn = None
|
348
353
|
|
349
354
|
def _response(self, u):
|
350
|
-
|
355
|
+
if u.size % self.domain.nnodes != 0:
|
356
|
+
raise IndexError(f"Size of input vector ({u.size}) does not match number of nodes ({self.domain.nnodes})")
|
357
|
+
ndof = u.size // self.domain.nnodes
|
358
|
+
|
359
|
+
if self.element_matrix.shape[-1] != self.domain.elemnodes * ndof:
|
360
|
+
# Initialize only after first call to response(), because the number of dofs may not yet be known
|
361
|
+
em = self.element_matrix.copy()
|
362
|
+
assert em.shape[-1] == self.domain.elemnodes, f"Size of element matrix must match #dofs_per_element ({ndof*self.domain.elemnodes}) or #nodes_per_element ({self.domain.elemnodes})."
|
363
|
+
|
364
|
+
# Element matrix is repeated for each dof
|
365
|
+
self.element_matrix = np.zeros((ndof, *self.element_matrix.shape[:-1], ndof * self.domain.elemnodes))
|
366
|
+
for i in range(ndof):
|
367
|
+
self.element_matrix[i, ..., i::ndof] = em
|
368
|
+
|
369
|
+
if self.dofconn is None:
|
370
|
+
self.dofconn = self.domain.get_dofconnectivity(ndof)
|
371
|
+
|
372
|
+
assert self.element_matrix.shape[-1] == ndof * self.domain.elemnodes
|
351
373
|
return einsum('...k, lk -> ...l', self.element_matrix, u[self.dofconn], optimize=True)
|
352
374
|
|
353
375
|
def _sensitivity(self, dy):
|
@@ -372,10 +394,10 @@ class Strain(ElementOperation):
|
|
372
394
|
in case ``voigt = True``, for which :math:`\gamma_{xy}=2\epsilon_{xy}`.
|
373
395
|
|
374
396
|
Input Signal:
|
375
|
-
- ``u``: Nodal vector of size ``(#
|
397
|
+
- ``u``: Nodal vector of size ``(#dofs_per_node * #nodes)``
|
376
398
|
|
377
399
|
Output Signal:
|
378
|
-
- ``e``: Strain matrix of size ``(#
|
400
|
+
- ``e``: Strain matrix of size ``(#strains_per_element, #elements)``
|
379
401
|
|
380
402
|
Args:
|
381
403
|
domain: The domain defining element and nodal connectivity
|
@@ -408,10 +430,10 @@ class Stress(Strain):
|
|
408
430
|
""" Calculate the average stresses per element
|
409
431
|
|
410
432
|
Input Signal:
|
411
|
-
- ``u``: Nodal vector of size ``(#
|
433
|
+
- ``u``: Nodal vector of size ``(#dofs_per_node * #nodes)``
|
412
434
|
|
413
435
|
Output Signal:
|
414
|
-
- ``s``: Stress matrix of size ``(#
|
436
|
+
- ``s``: Stress matrix of size ``(#stresses_per_element, #elements)``
|
415
437
|
|
416
438
|
Args:
|
417
439
|
domain: The domain defining element and nodal connectivity
|
@@ -431,3 +453,99 @@ class Stress(Strain):
|
|
431
453
|
if domain.dim == 2:
|
432
454
|
D *= domain.element_size[2]
|
433
455
|
self.element_matrix = D @ self.element_matrix
|
456
|
+
|
457
|
+
|
458
|
+
class ElementAverage(ElementOperation):
|
459
|
+
r""" Determine average value in element of input nodal values
|
460
|
+
|
461
|
+
Input Signal:
|
462
|
+
- ``v``: Nodal vector of size ``(#dofs_per_node * #nodes)``
|
463
|
+
|
464
|
+
Output Signal:
|
465
|
+
- ``v_el``: Elemental vector of size ``(#elements)`` or ``(#dofs, #elements)`` if ``#dofs_per_node>1``
|
466
|
+
|
467
|
+
Args:
|
468
|
+
domain: The domain defining element and nodal connectivity
|
469
|
+
"""
|
470
|
+
def _prepare(self, domain: DomainDefinition):
|
471
|
+
shapefuns = domain.eval_shape_fun(pos=np.array([0, 0, 0]))
|
472
|
+
super()._prepare(domain, shapefuns)
|
473
|
+
|
474
|
+
|
475
|
+
class NodalOperation(Module):
|
476
|
+
r""" Generic module for nodal operations based on elemental information
|
477
|
+
|
478
|
+
:math:`u_e = \mathbf{A} x_e`
|
479
|
+
|
480
|
+
This module is the reverse of :py:class:`pymoto.ElementOperation`.
|
481
|
+
|
482
|
+
Input Signal:
|
483
|
+
- ``x``: Elemental vector of size ``(#elements)``
|
484
|
+
|
485
|
+
Output Signal:
|
486
|
+
- ``u``: nodal output data of size ``(..., #dofs_per_node * #nodes)``
|
487
|
+
|
488
|
+
Args:
|
489
|
+
domain: The domain defining element and nodal connectivity
|
490
|
+
element_matrix: The element operator matrix :math:`\mathbf{A}` of size ``(..., #dofs_per_element)``
|
491
|
+
"""
|
492
|
+
def _prepare(self, domain: DomainDefinition, element_matrix: np.ndarray):
|
493
|
+
if element_matrix.shape[-1] % domain.elemnodes != 0:
|
494
|
+
raise IndexError("Size of last dimension of element operator matrix is not compatible with mesh. "
|
495
|
+
"Must be dividable by the number of nodes.")
|
496
|
+
|
497
|
+
ndof = element_matrix.shape[-1] // domain.elemnodes
|
498
|
+
|
499
|
+
self.element_matrix = element_matrix
|
500
|
+
self.dofconn = domain.get_dofconnectivity(ndof)
|
501
|
+
self.ndofs = ndof*domain.nnodes
|
502
|
+
|
503
|
+
def _response(self, x):
|
504
|
+
dofs_el = einsum('...k, ...l -> lk', self.element_matrix, x, optimize=True)
|
505
|
+
dofs = np.zeros(self.ndofs)
|
506
|
+
np.add.at(dofs, self.dofconn, dofs_el)
|
507
|
+
return dofs
|
508
|
+
|
509
|
+
def _sensitivity(self, dx):
|
510
|
+
return einsum('...k, lk -> ...l', self.element_matrix, dx[self.dofconn], optimize=True)
|
511
|
+
|
512
|
+
|
513
|
+
class ThermoMechanical(NodalOperation):
|
514
|
+
r""" Determine equivalent thermo-mechanical load from design vector and elemental temperature difference
|
515
|
+
|
516
|
+
:math:`f_thermal = \mathbf{A} (x*t_delta)_e`
|
517
|
+
|
518
|
+
Input Signal:
|
519
|
+
- ``x*t_delta``: Elemental vector of size ``(#elements)`` containing elemental densities multiplied by
|
520
|
+
elemental temperature difference
|
521
|
+
|
522
|
+
Output Signal:
|
523
|
+
- ``f_thermal``: nodal equivalent thermo-mechanical load of size ``(#dofs_per_node * #nodes)``
|
524
|
+
|
525
|
+
Args:
|
526
|
+
domain: The domain defining element and nodal connectivity
|
527
|
+
e_modulus (optional): Young's modulus
|
528
|
+
poisson_ratio (optional): Poisson ratio
|
529
|
+
alpha (optional): Coefficient of thermal expansion
|
530
|
+
plane (optional): Plane 'strain' or 'stress'
|
531
|
+
"""
|
532
|
+
def _prepare(self, domain: DomainDefinition, e_modulus: float = 1.0, poisson_ratio: float = 0.3, alpha: float = 1e-6, plane: str = 'strain'):
|
533
|
+
dim = domain.dim
|
534
|
+
D = get_D(e_modulus, poisson_ratio, '3d' if dim == 3 else plane.lower())
|
535
|
+
if dim == 2:
|
536
|
+
Phi = np.array([1, 1, 0])
|
537
|
+
D *= domain.element_size[2]
|
538
|
+
elif dim == 3:
|
539
|
+
Phi = np.array([1, 1, 1, 0, 0, 0])
|
540
|
+
|
541
|
+
# Numerical integration
|
542
|
+
BDPhi = np.zeros(domain.elemnodes * dim)
|
543
|
+
siz = domain.element_size
|
544
|
+
w = np.prod(siz[:domain.dim] / 2)
|
545
|
+
for n in domain.node_numbering:
|
546
|
+
pos = n * (siz / 2) / np.sqrt(3) # Sampling point
|
547
|
+
dN_dx = domain.eval_shape_fun_der(pos)
|
548
|
+
B = get_B(dN_dx)
|
549
|
+
BDPhi += w * B.T @ D @ Phi # Add contribution
|
550
|
+
|
551
|
+
super()._prepare(domain, alpha*BDPhi)
|
pymoto/modules/io.py
CHANGED
@@ -202,15 +202,16 @@ class PlotIter(FigModule):
|
|
202
202
|
show (bool): Show the figure on the screen
|
203
203
|
ylim: Provide y-axis limits for the plot
|
204
204
|
"""
|
205
|
-
def _prepare(self, ylim=None):
|
205
|
+
def _prepare(self, ylim=None, log_scale=False):
|
206
206
|
self.minlim = 1e+200
|
207
207
|
self.maxlim = -1e+200
|
208
208
|
self.ylim = ylim
|
209
|
+
self.log_scale = log_scale
|
209
210
|
|
210
211
|
def _response(self, *args):
|
211
212
|
if not hasattr(self, 'ax'):
|
212
213
|
self.ax = self.fig.add_subplot(111)
|
213
|
-
self.ax.set_yscale('linear')
|
214
|
+
self.ax.set_yscale('linear' if not self.log_scale else 'log')
|
214
215
|
self.ax.set_xlabel("Iteration")
|
215
216
|
|
216
217
|
if not hasattr(self, 'line'):
|
@@ -233,13 +234,24 @@ class PlotIter(FigModule):
|
|
233
234
|
self.minlim = min(self.minlim, np.min(xadd))
|
234
235
|
self.maxlim = max(self.maxlim, np.max(xadd))
|
235
236
|
|
236
|
-
dy = max((self.maxlim - self.minlim)*0.05, sys.float_info.min)
|
237
|
-
|
238
237
|
self.ax.set_xlim([-0.5, self.iter+0.5])
|
239
238
|
if self.ylim is not None:
|
240
239
|
self.ax.set_ylim(self.ylim)
|
241
240
|
elif np.isfinite(self.minlim) and np.isfinite(self.maxlim):
|
242
|
-
|
241
|
+
if self.log_scale:
|
242
|
+
dy = (np.log10(self.maxlim) - np.log10(self.minlim))*0.05
|
243
|
+
ll = 10**(np.log10(self.minlim) - dy)
|
244
|
+
ul = 10**(np.log10(self.maxlim) + dy)
|
245
|
+
else:
|
246
|
+
dy = (self.maxlim - self.minlim)*0.05
|
247
|
+
ll = self.minlim - dy
|
248
|
+
ul = self.maxlim + dy
|
249
|
+
|
250
|
+
if ll == ul:
|
251
|
+
dy = abs(np.nextafter(abs(ll), 1) - abs(ll))
|
252
|
+
ll = ll - 1e5*dy
|
253
|
+
ul = ul + 1e5*dy
|
254
|
+
self.ax.set_ylim([ll, ul])
|
243
255
|
|
244
256
|
self._update_fig()
|
245
257
|
|
@@ -255,7 +267,7 @@ class WriteToVTI(Module):
|
|
255
267
|
accepted, which get the suffixed as ``_00``.
|
256
268
|
|
257
269
|
Input Signals:
|
258
|
-
- ``*args`` (`numpy.
|
270
|
+
- ``*args`` (`numpy.ndarray`): Vectors to write to VTI. The signal tags are used as name.
|
259
271
|
|
260
272
|
Args:
|
261
273
|
domain: The domain layout
|
@@ -282,3 +294,65 @@ class WriteToVTI(Module):
|
|
282
294
|
filen = pth[0] + '.{0:04d}'.format(self.iter) + pth[1]
|
283
295
|
self.domain.write_to_vti(data, filename=filen, scale=self.scale)
|
284
296
|
self.iter += 1
|
297
|
+
|
298
|
+
|
299
|
+
class ScalarToFile(Module):
|
300
|
+
""" Writes iteration data to a log file
|
301
|
+
|
302
|
+
This function can also handle small vectors of scalars, i.e. eigenfrequencies or multiple constraints.
|
303
|
+
|
304
|
+
Input Signals:
|
305
|
+
- ``*args`` (`Numeric` or `np.ndarray`): Values to write to file. The signal tags are used as name.
|
306
|
+
|
307
|
+
Args:
|
308
|
+
saveto: Location to save the log file, supports .txt or .csv
|
309
|
+
fmt (optional): Value format (e.g. 'e', 'f', '.3e', '.5g', '.3f')
|
310
|
+
separator (optional): Value separator, .csv files will automatically use a comma
|
311
|
+
"""
|
312
|
+
def _prepare(self, saveto: str, fmt: str = '.10e', separator: str = '\t'):
|
313
|
+
self.saveto = saveto
|
314
|
+
Path(saveto).parent.mkdir(parents=True, exist_ok=True)
|
315
|
+
self.iter = 0
|
316
|
+
|
317
|
+
# Test the format
|
318
|
+
3.14.__format__(fmt)
|
319
|
+
self.format = fmt
|
320
|
+
|
321
|
+
self.separator = "," if ".csv" in self.saveto else separator
|
322
|
+
|
323
|
+
def _response(self, *args):
|
324
|
+
tags = [] if self.iter == 0 else None
|
325
|
+
|
326
|
+
# Add iteration as first column
|
327
|
+
dat = [self.iter.__format__('d')]
|
328
|
+
if tags is not None:
|
329
|
+
tags.append('Iteration')
|
330
|
+
|
331
|
+
# Add all signals
|
332
|
+
for s in self.sig_in:
|
333
|
+
if np.size(np.asarray(s.state)) > 1:
|
334
|
+
it = np.nditer(s.state, flags=['multi_index'])
|
335
|
+
while not it.finished:
|
336
|
+
dat.append(it.value.__format__(self.format))
|
337
|
+
if tags is not None:
|
338
|
+
tags.append(f"{s.tag}{list(it.multi_index)}")
|
339
|
+
it.iternext()
|
340
|
+
else:
|
341
|
+
dat.append(s.state.__format__(self.format))
|
342
|
+
if tags is not None:
|
343
|
+
tags.append(s.tag)
|
344
|
+
|
345
|
+
# Write to file
|
346
|
+
if tags is not None:
|
347
|
+
assert len(tags) == len(dat)
|
348
|
+
with open(self.saveto, "w+") as f:
|
349
|
+
# Write header line
|
350
|
+
f.write(self.separator.join(tags))
|
351
|
+
f.write("\n")
|
352
|
+
|
353
|
+
with open(self.saveto, "a+") as f:
|
354
|
+
# Write data
|
355
|
+
f.write(self.separator.join(dat))
|
356
|
+
f.write("\n")
|
357
|
+
|
358
|
+
self.iter += 1
|