unocg 0.0.3__tar.gz → 0.0.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of unocg might be problematic. Click here for more details.
- {unocg-0.0.3/unocg.egg-info → unocg-0.0.4}/PKG-INFO +1 -1
- {unocg-0.0.3 → unocg-0.0.4}/pyproject.toml +1 -1
- unocg-0.0.4/unocg/training/losses/__init__.py +1 -0
- unocg-0.0.4/unocg/training/losses/base.py +102 -0
- unocg-0.0.4/unocg/training/losses/mechanical.py +246 -0
- unocg-0.0.4/unocg/training/losses/thermal.py +247 -0
- unocg-0.0.4/unocg/utils/__init__.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4/unocg.egg-info}/PKG-INFO +1 -1
- {unocg-0.0.3 → unocg-0.0.4}/unocg.egg-info/SOURCES.txt +5 -0
- {unocg-0.0.3 → unocg-0.0.4}/LICENSE +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/README.md +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/setup.cfg +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/setup.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/__init__.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/base.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/config.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/materials/__init__.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/materials/base.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/materials/mechanical.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/materials/thermal.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/modules/__init__.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/modules/operators.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/modules/preconditioners.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/modules/solvers.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/preconditioners/__init__.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/preconditioners/base.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/preconditioners/torch.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/problems/__init__.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/problems/base.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/problems/mechanical.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/problems/thermal.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/solvers/__init__.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/solvers/base.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/solvers/torch.py +0 -0
- {unocg-0.0.3/unocg/utils → unocg-0.0.4/unocg/training}/__init__.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/transforms/__init__.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/transforms/base.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/transforms/fourier.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/utils/data.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/utils/evaluation.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg/utils/plotting.py +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg.egg-info/dependency_links.txt +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg.egg-info/requires.txt +0 -0
- {unocg-0.0.3 → unocg-0.0.4}/unocg.egg-info/top_level.txt +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .base import Loss, WeightedLoss
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Definition of loss function that allow for a physics-informed training of machine learning models
|
|
3
|
+
"""
|
|
4
|
+
from abc import ABC
|
|
5
|
+
from typing import Union, Iterable, List, Optional
|
|
6
|
+
|
|
7
|
+
# third-party packages
|
|
8
|
+
import torch
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Loss(ABC):
|
|
12
|
+
"""
|
|
13
|
+
Abstract loss function
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(self, n_dim: int = 2, reduction: str = "sum"):
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
:param n_dim: dimension of the problem
|
|
20
|
+
:param reduction: ('mean'|'sum'|'none'), defaults to 'sum'
|
|
21
|
+
:type reduction: str, optional
|
|
22
|
+
"""
|
|
23
|
+
super().__init__()
|
|
24
|
+
self.n_dim = n_dim
|
|
25
|
+
self.ch_dim = -(1 + self.n_dim)
|
|
26
|
+
self.reduction = reduction
|
|
27
|
+
|
|
28
|
+
def reduce(self, loss):
|
|
29
|
+
"""
|
|
30
|
+
Perform a reduction step over all datasets to transform a loss function to a cost function.
|
|
31
|
+
|
|
32
|
+
A loss function is evaluated element-wise for a dataset.
|
|
33
|
+
However, a cost function should return a single value for the dataset.
|
|
34
|
+
Typically, `mean` reduction is used.
|
|
35
|
+
|
|
36
|
+
:param loss: Tensor that contains the element-wise loss for a dataset
|
|
37
|
+
:type loss: :class:`torch.Tensor`
|
|
38
|
+
:return: Reduced loss
|
|
39
|
+
:rtype: float
|
|
40
|
+
"""
|
|
41
|
+
if self.reduction == "mean":
|
|
42
|
+
return torch.nanmean(loss)
|
|
43
|
+
elif self.reduction == "sum":
|
|
44
|
+
return torch.nansum(loss)
|
|
45
|
+
else:
|
|
46
|
+
return loss
|
|
47
|
+
|
|
48
|
+
def unsqueeze(self, output, target):
|
|
49
|
+
"""
|
|
50
|
+
Ensure that the tensors :code:`output` and :code:`target` have a shape of the form :code:`(N, features)`.
|
|
51
|
+
|
|
52
|
+
When a loss function is called with a single data point, the tensor shape is :code:`(features)` and hence does not fit.
|
|
53
|
+
This method expands the dimensions if needed.
|
|
54
|
+
|
|
55
|
+
:param output: Model output
|
|
56
|
+
:type output: :class:`torch.Tensor`
|
|
57
|
+
:param target: Target data
|
|
58
|
+
:type target: :class:`torch.Tensor`
|
|
59
|
+
:return: Tuple (output, target)
|
|
60
|
+
:rtype: tuple
|
|
61
|
+
"""
|
|
62
|
+
while output.ndim < self.n_dim + 2:
|
|
63
|
+
output = torch.unsqueeze(output, 0)
|
|
64
|
+
while target.ndim < output.ndim:
|
|
65
|
+
target = torch.unsqueeze(target, 0)
|
|
66
|
+
while output.ndim < target.ndim:
|
|
67
|
+
output = torch.unsqueeze(output, 0)
|
|
68
|
+
assert output.ndim >= self.n_dim + 2 and output.ndim == target.ndim
|
|
69
|
+
return output, target
|
|
70
|
+
# return output.flatten(end_dim=-(self.n_dim + 2)), target.flatten(end_dim=-(self.n_dim + 2))
|
|
71
|
+
|
|
72
|
+
@property
|
|
73
|
+
def dims(self):
|
|
74
|
+
return self.n_dim * (slice(None),)
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def dims_list(self):
|
|
78
|
+
return tuple(range(-1, -(1 + self.n_dim), -1))
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def expand_dims(self):
|
|
82
|
+
return self.n_dim * (None,)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class WeightedLoss(Loss):
|
|
86
|
+
"""
|
|
87
|
+
Weighted loss function that represents a linear combination of several loss functions
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
def __init__(self, losses: Iterable[any], weights: Iterable[Union[float, int]], reduction="mean"):
|
|
91
|
+
super().__init__(reduction=reduction)
|
|
92
|
+
self.losses = losses
|
|
93
|
+
self.weights = weights
|
|
94
|
+
|
|
95
|
+
def __call__(self, output, target):
|
|
96
|
+
total_loss = 0.0
|
|
97
|
+
for loss, weight in zip(self.losses, self.weights):
|
|
98
|
+
total_loss += weight * loss(output, target)
|
|
99
|
+
return total_loss
|
|
100
|
+
|
|
101
|
+
def __str__(self):
|
|
102
|
+
return f"Weighted(...)"
|
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .base import Loss
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class DispLoss(Loss):
|
|
6
|
+
"""
|
|
7
|
+
Displacement loss function
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
def __init__(self, n_dim=2, reduction="sum"):
|
|
11
|
+
super().__init__(n_dim=n_dim, reduction=reduction)
|
|
12
|
+
|
|
13
|
+
def abs(self, output, target):
|
|
14
|
+
"""
|
|
15
|
+
Compute absolute error for temperature field
|
|
16
|
+
|
|
17
|
+
:param output:
|
|
18
|
+
:param target:
|
|
19
|
+
:return:
|
|
20
|
+
"""
|
|
21
|
+
output, target = self.unsqueeze(output, target)
|
|
22
|
+
output_disp, target_disp = output[..., :self.n_dim, *self.dims], target[..., :self.n_dim, *self.dims]
|
|
23
|
+
loss = torch.sqrt(torch.nn.MSELoss(reduction="none")(target_disp, output_disp).nanmean(self.dims_list))
|
|
24
|
+
loss = self.reduce(loss).squeeze(-1)
|
|
25
|
+
return loss
|
|
26
|
+
|
|
27
|
+
def rel(self, output, target):
|
|
28
|
+
"""
|
|
29
|
+
Compute relative error for temperature field
|
|
30
|
+
|
|
31
|
+
:param output:
|
|
32
|
+
:param target:
|
|
33
|
+
:return:
|
|
34
|
+
"""
|
|
35
|
+
output, target = self.unsqueeze(output, target)
|
|
36
|
+
output_disp, target_disp = output[..., :self.n_dim, *self.dims], target[..., :self.n_dim, *self.dims]
|
|
37
|
+
loss = torch.linalg.norm(target_disp - output_disp, dim=-(1 + self.n_dim)).nanmean(self.dims_list) \
|
|
38
|
+
/ torch.linalg.norm(target_disp, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
|
|
39
|
+
loss = self.reduce(loss).squeeze(-1)
|
|
40
|
+
return loss
|
|
41
|
+
|
|
42
|
+
def __call__(self, x, y):
|
|
43
|
+
return self.rel(x, y)
|
|
44
|
+
|
|
45
|
+
def __str__(self):
|
|
46
|
+
return f"Temp({self.reduction})"
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class StrainLoss(Loss):
|
|
50
|
+
"""
|
|
51
|
+
Strain loss function
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
def __init__(self, grad_module, n_dim=2, reduction="sum"):
|
|
55
|
+
super().__init__(n_dim=n_dim, reduction=reduction)
|
|
56
|
+
self.grad_module = grad_module
|
|
57
|
+
|
|
58
|
+
def abs(self, output, target):
|
|
59
|
+
"""
|
|
60
|
+
Compute absolute error for temperature field
|
|
61
|
+
|
|
62
|
+
:param output:
|
|
63
|
+
:param target:
|
|
64
|
+
:return:
|
|
65
|
+
"""
|
|
66
|
+
output, target = self.unsqueeze(output, target)
|
|
67
|
+
output_temp, target_temp = output[..., :self.n_dim, *self.dims], target[..., :self.n_dim, *self.dims]
|
|
68
|
+
loss = torch.linalg.norm(target_temp - output_temp, dim=self.ch_dim)
|
|
69
|
+
loss = self.reduce(loss)
|
|
70
|
+
return loss
|
|
71
|
+
|
|
72
|
+
def rel(self, output, target):
|
|
73
|
+
"""
|
|
74
|
+
Compute relative error for temperature field
|
|
75
|
+
|
|
76
|
+
:param output:
|
|
77
|
+
:param target:
|
|
78
|
+
:return:
|
|
79
|
+
"""
|
|
80
|
+
output, target = self.unsqueeze(output, target)
|
|
81
|
+
output_temp, target_temp = output[..., :self.n_dim, *self.dims], target[..., :self.n_dim, *self.dims]
|
|
82
|
+
batch_dims = output_temp.shape[:self.ch_dim]
|
|
83
|
+
output_temp = torch.flatten(output_temp, start_dim=0, end_dim=self.ch_dim - 1)
|
|
84
|
+
target_temp = torch.flatten(target_temp, start_dim=0, end_dim=self.ch_dim - 1)
|
|
85
|
+
output_grad = self.grad_module(output_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
|
|
86
|
+
target_grad = self.grad_module(target_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
|
|
87
|
+
output_temp = torch.unflatten(output_temp, dim=0, sizes=batch_dims)
|
|
88
|
+
target_temp = torch.unflatten(target_temp, dim=0, sizes=batch_dims)
|
|
89
|
+
loss = torch.linalg.norm(target_grad - output_grad, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
|
|
90
|
+
loss = self.reduce(loss)
|
|
91
|
+
return loss
|
|
92
|
+
|
|
93
|
+
def __call__(self, x, y):
|
|
94
|
+
return self.rel(x, y)
|
|
95
|
+
|
|
96
|
+
def __str__(self):
|
|
97
|
+
return f"Grad({self.reduction})"
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class StressLoss(Loss):
|
|
101
|
+
"""
|
|
102
|
+
Flux loss function
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
def __init__(self, n_dim=2, reduction="sum"):
|
|
106
|
+
super().__init__(n_dim=n_dim, reduction=reduction)
|
|
107
|
+
|
|
108
|
+
def abs(self, output, target):
|
|
109
|
+
"""
|
|
110
|
+
Compute absolute error in strain norm
|
|
111
|
+
|
|
112
|
+
:param output:
|
|
113
|
+
:param target:
|
|
114
|
+
:return:
|
|
115
|
+
"""
|
|
116
|
+
output, target = self.unsqueeze(output, target)
|
|
117
|
+
output_flux, target_flux = output[..., self.n_dim:, *self.dims], target[..., self.n_dim:, *self.dims]
|
|
118
|
+
loss = torch.linalg.norm(target_flux - output_flux, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
|
|
119
|
+
loss = self.reduce(loss)
|
|
120
|
+
return loss
|
|
121
|
+
|
|
122
|
+
def rel(self, output, target):
|
|
123
|
+
"""
|
|
124
|
+
Compute absolute error in strain norm
|
|
125
|
+
|
|
126
|
+
:param output:
|
|
127
|
+
:param target:
|
|
128
|
+
:return:
|
|
129
|
+
"""
|
|
130
|
+
output, target = self.unsqueeze(output, target)
|
|
131
|
+
output_flux, target_flux = output[..., self.n_dim:, *self.dims], target[..., self.n_dim:, *self.dims]
|
|
132
|
+
loss = torch.linalg.norm(target_flux - output_flux, dim=-(1 + self.n_dim)).nanmean(self.dims_list) \
|
|
133
|
+
/ torch.linalg.norm(target_flux, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
|
|
134
|
+
loss = self.reduce(loss)
|
|
135
|
+
return loss
|
|
136
|
+
|
|
137
|
+
def __call__(self, output: torch.Tensor, target: torch.Tensor):
|
|
138
|
+
return self.rel(output, target)
|
|
139
|
+
|
|
140
|
+
def __str__(self):
|
|
141
|
+
return f"Flux({self.reduction})"
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class MechanicalEnergyLoss(Loss):
|
|
145
|
+
"""
|
|
146
|
+
Thermal energy loss function
|
|
147
|
+
"""
|
|
148
|
+
|
|
149
|
+
def __init__(self, grad_module, n_dim=2, reduction="sum"):
|
|
150
|
+
super().__init__(n_dim=n_dim, reduction=reduction)
|
|
151
|
+
self.grad_module = grad_module
|
|
152
|
+
|
|
153
|
+
def abs(self, output, target):
|
|
154
|
+
"""
|
|
155
|
+
Compute absolute error in strain norm
|
|
156
|
+
|
|
157
|
+
:param output:
|
|
158
|
+
:param target:
|
|
159
|
+
:return:
|
|
160
|
+
"""
|
|
161
|
+
output, target = self.unsqueeze(output, target)
|
|
162
|
+
output_temp, target_temp = output[..., :self.n_dim, *self.dims], target[..., :self.n_dim, *self.dims]
|
|
163
|
+
output_flux, target_flux = output[..., self.n_dim:, *self.dims], target[..., self.n_dim:, *self.dims]
|
|
164
|
+
batch_dims = output_temp.shape[:self.ch_dim]
|
|
165
|
+
output_temp = torch.flatten(output_temp, start_dim=0, end_dim=self.ch_dim - 1)
|
|
166
|
+
target_temp = torch.flatten(target_temp, start_dim=0, end_dim=self.ch_dim - 1)
|
|
167
|
+
output_grad = self.grad_module(output_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
|
|
168
|
+
target_grad = self.grad_module(target_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
|
|
169
|
+
output_temp = torch.unflatten(output_temp, dim=0, sizes=batch_dims)
|
|
170
|
+
target_temp = torch.unflatten(target_temp, dim=0, sizes=batch_dims)
|
|
171
|
+
loss = torch.sqrt(
|
|
172
|
+
torch.linalg.norm((output_grad - target_grad) * (output_flux - target_flux), dim=self.ch_dim).nanmean(self.dims_list)
|
|
173
|
+
)
|
|
174
|
+
loss = self.reduce(loss)
|
|
175
|
+
return loss
|
|
176
|
+
|
|
177
|
+
def rel(self, output, target):
|
|
178
|
+
"""
|
|
179
|
+
Compute absolute error in strain norm
|
|
180
|
+
|
|
181
|
+
:param output:
|
|
182
|
+
:param target:
|
|
183
|
+
:return:
|
|
184
|
+
"""
|
|
185
|
+
output, target = self.unsqueeze(output, target)
|
|
186
|
+
output_temp, target_temp = output[..., :self.n_dim, *self.dims], target[..., :self.n_dim, *self.dims]
|
|
187
|
+
output_flux, target_flux = output[..., self.n_dim:, *self.dims], target[..., self.n_dim:, *self.dims]
|
|
188
|
+
batch_dims = output_temp.shape[:self.ch_dim]
|
|
189
|
+
output_temp = torch.flatten(output_temp, start_dim=0, end_dim=self.ch_dim - 1)
|
|
190
|
+
target_temp = torch.flatten(target_temp, start_dim=0, end_dim=self.ch_dim - 1)
|
|
191
|
+
output_grad = self.grad_module(output_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
|
|
192
|
+
target_grad = self.grad_module(target_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
|
|
193
|
+
output_temp = torch.unflatten(output_temp, dim=0, sizes=batch_dims)
|
|
194
|
+
target_temp = torch.unflatten(target_temp, dim=0, sizes=batch_dims)
|
|
195
|
+
loss = torch.sqrt(
|
|
196
|
+
torch.linalg.norm((output_grad - target_grad) * (output_flux - target_flux), dim=self.ch_dim).nanmean(self.dims_list)
|
|
197
|
+
/ torch.linalg.norm(target_grad * target_flux, dim=self.ch_dim).nanmean(self.dims_list)
|
|
198
|
+
)
|
|
199
|
+
loss = self.reduce(loss)
|
|
200
|
+
return loss
|
|
201
|
+
|
|
202
|
+
def __call__(self, output_disp: torch.Tensor, target_disp: torch.Tensor, param_field=None):
|
|
203
|
+
return self.abs(output_disp, target_disp)
|
|
204
|
+
|
|
205
|
+
def __str__(self):
|
|
206
|
+
return f"ThermEnergy({self.reduction})"
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
class LinearElasticResidualLoss(Loss):
|
|
210
|
+
"""
|
|
211
|
+
Homogenized stress loss function
|
|
212
|
+
"""
|
|
213
|
+
|
|
214
|
+
def __init__(self, div_module, n_dim=2, reduction="sum", residual_mode="mean"):
|
|
215
|
+
super().__init__(n_dim=n_dim, reduction=reduction)
|
|
216
|
+
self.div_module = div_module
|
|
217
|
+
self.residual_mode = residual_mode
|
|
218
|
+
|
|
219
|
+
def abs(self, output, target=None):
|
|
220
|
+
"""
|
|
221
|
+
Compute absolute error in strain norm
|
|
222
|
+
|
|
223
|
+
:param output:
|
|
224
|
+
:param target:
|
|
225
|
+
:return:
|
|
226
|
+
"""
|
|
227
|
+
output, target = self.unsqueeze(output, target)
|
|
228
|
+
output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
|
|
229
|
+
batch_dims = target_flux.shape[:self.ch_dim]
|
|
230
|
+
output_flux = torch.flatten(output_flux, start_dim=0, end_dim=self.ch_dim - 1).unsqueeze(self.ch_dim - 1)
|
|
231
|
+
output_residual = self.div_module(output_flux)
|
|
232
|
+
output_residual = torch.unflatten(output_residual, dim=0, sizes=batch_dims)
|
|
233
|
+
if self.residual_mode == "sum":
|
|
234
|
+
loss = torch.abs(output_residual).nansum(self.dims_list)
|
|
235
|
+
elif self.residual_mode == "mean":
|
|
236
|
+
loss = torch.abs(output_residual).nanmean(self.dims_list)
|
|
237
|
+
else:
|
|
238
|
+
raise ValueError("Unknown residual_mode")
|
|
239
|
+
loss = self.reduce(loss).squeeze(-1)
|
|
240
|
+
return loss
|
|
241
|
+
|
|
242
|
+
def __call__(self, output: torch.Tensor, target):
|
|
243
|
+
return self.abs(output, target)
|
|
244
|
+
|
|
245
|
+
def __str__(self):
|
|
246
|
+
return f"HeatCondResidual({self.reduction})"
|
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .base import Loss
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class TempLoss(Loss):
|
|
6
|
+
"""
|
|
7
|
+
Displacement loss function
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
def __init__(self, n_dim=2, reduction="sum"):
|
|
11
|
+
super().__init__(n_dim=n_dim, reduction=reduction)
|
|
12
|
+
|
|
13
|
+
def abs(self, output, target):
|
|
14
|
+
"""
|
|
15
|
+
Compute absolute error for temperature field
|
|
16
|
+
|
|
17
|
+
:param output:
|
|
18
|
+
:param target:
|
|
19
|
+
:return:
|
|
20
|
+
"""
|
|
21
|
+
output, target = self.unsqueeze(output, target)
|
|
22
|
+
output_temp, target_temp = output[..., :1, *self.dims], target[..., :1, *self.dims]
|
|
23
|
+
loss = torch.sqrt(torch.nn.MSELoss(reduction="none")(target_temp, output_temp).nanmean(self.dims_list))
|
|
24
|
+
loss = self.reduce(loss).squeeze(-1)
|
|
25
|
+
return loss
|
|
26
|
+
|
|
27
|
+
def rel(self, output, target):
|
|
28
|
+
"""
|
|
29
|
+
Compute relative error for temperature field
|
|
30
|
+
|
|
31
|
+
:param output:
|
|
32
|
+
:param target:
|
|
33
|
+
:return:
|
|
34
|
+
"""
|
|
35
|
+
output, target = self.unsqueeze(output, target)
|
|
36
|
+
output_temp, target_temp = output[..., :1, *self.dims], target[..., :1, *self.dims]
|
|
37
|
+
loss = torch.sqrt(torch.nn.MSELoss(reduction="none")(target_temp, output_temp).nanmean(self.dims_list) / \
|
|
38
|
+
torch.nn.MSELoss(reduction="none")(target_temp, torch.zeros_like(target_temp)).nanmean(self.dims_list))
|
|
39
|
+
loss = self.reduce(loss).squeeze(-1)
|
|
40
|
+
return loss
|
|
41
|
+
|
|
42
|
+
def __call__(self, x, y):
|
|
43
|
+
return self.rel(x, y)
|
|
44
|
+
|
|
45
|
+
def __str__(self):
|
|
46
|
+
return f"Temp({self.reduction})"
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class GradLoss(Loss):
|
|
50
|
+
"""
|
|
51
|
+
Displacement loss function
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
def __init__(self, grad_module, n_dim=2, reduction="sum"):
|
|
55
|
+
super().__init__(n_dim=n_dim, reduction=reduction)
|
|
56
|
+
self.grad_module = grad_module
|
|
57
|
+
|
|
58
|
+
def abs(self, output, target):
|
|
59
|
+
"""
|
|
60
|
+
Compute absolute error for temperature field
|
|
61
|
+
|
|
62
|
+
:param output:
|
|
63
|
+
:param target:
|
|
64
|
+
:return:
|
|
65
|
+
"""
|
|
66
|
+
output, target = self.unsqueeze(output, target)
|
|
67
|
+
output_temp, target_temp = output[..., :1, *self.dims], target[..., :1, *self.dims]
|
|
68
|
+
loss = torch.linalg.norm(target_temp - output_temp, dim=-3)
|
|
69
|
+
loss = self.reduce(loss)
|
|
70
|
+
return loss
|
|
71
|
+
|
|
72
|
+
def rel(self, output, target):
|
|
73
|
+
"""
|
|
74
|
+
Compute relative error for temperature field
|
|
75
|
+
|
|
76
|
+
:param output:
|
|
77
|
+
:param target:
|
|
78
|
+
:return:
|
|
79
|
+
"""
|
|
80
|
+
output, target = self.unsqueeze(output, target)
|
|
81
|
+
output_temp, target_temp = output[..., :1, *self.dims], target[..., :1, *self.dims]
|
|
82
|
+
# output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
|
|
83
|
+
batch_dims = output_temp.shape[:self.ch_dim]
|
|
84
|
+
output_temp = torch.flatten(output_temp, start_dim=0, end_dim=self.ch_dim - 1)
|
|
85
|
+
target_temp = torch.flatten(target_temp, start_dim=0, end_dim=self.ch_dim - 1)
|
|
86
|
+
output_grad = self.grad_module(output_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
|
|
87
|
+
target_grad = self.grad_module(target_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
|
|
88
|
+
output_temp = torch.unflatten(output_temp, dim=0, sizes=batch_dims)
|
|
89
|
+
target_temp = torch.unflatten(target_temp, dim=0, sizes=batch_dims)
|
|
90
|
+
loss = torch.linalg.norm(target_grad - output_grad, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
|
|
91
|
+
loss = self.reduce(loss)
|
|
92
|
+
return loss
|
|
93
|
+
|
|
94
|
+
def __call__(self, x, y):
|
|
95
|
+
return self.rel(x, y)
|
|
96
|
+
|
|
97
|
+
def __str__(self):
|
|
98
|
+
return f"Grad({self.reduction})"
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
class FluxLoss(Loss):
|
|
102
|
+
"""
|
|
103
|
+
Flux loss function
|
|
104
|
+
"""
|
|
105
|
+
|
|
106
|
+
def __init__(self, n_dim=2, reduction="sum"):
|
|
107
|
+
super().__init__(n_dim=n_dim, reduction=reduction)
|
|
108
|
+
|
|
109
|
+
def abs(self, output, target):
|
|
110
|
+
"""
|
|
111
|
+
Compute absolute error in strain norm
|
|
112
|
+
|
|
113
|
+
:param output:
|
|
114
|
+
:param target:
|
|
115
|
+
:return:
|
|
116
|
+
"""
|
|
117
|
+
output, target = self.unsqueeze(output, target)
|
|
118
|
+
output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
|
|
119
|
+
loss = torch.linalg.norm(target_flux - output_flux, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
|
|
120
|
+
loss = self.reduce(loss)
|
|
121
|
+
return loss
|
|
122
|
+
|
|
123
|
+
def rel(self, output, target):
|
|
124
|
+
"""
|
|
125
|
+
Compute absolute error in strain norm
|
|
126
|
+
|
|
127
|
+
:param output:
|
|
128
|
+
:param target:
|
|
129
|
+
:return:
|
|
130
|
+
"""
|
|
131
|
+
output, target = self.unsqueeze(output, target)
|
|
132
|
+
output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
|
|
133
|
+
loss = torch.linalg.norm(target_flux - output_flux, dim=-(1 + self.n_dim)).nanmean(self.dims_list) \
|
|
134
|
+
/ torch.linalg.norm(target_flux, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
|
|
135
|
+
loss = self.reduce(loss)
|
|
136
|
+
return loss
|
|
137
|
+
|
|
138
|
+
def __call__(self, output: torch.Tensor, target: torch.Tensor):
|
|
139
|
+
return self.rel(output, target)
|
|
140
|
+
|
|
141
|
+
def __str__(self):
|
|
142
|
+
return f"Flux({self.reduction})"
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class ThermalEnergyLoss(Loss):
|
|
146
|
+
"""
|
|
147
|
+
Thermal energy loss function
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
def __init__(self, grad_module, n_dim=2, reduction="sum"):
|
|
151
|
+
super().__init__(n_dim=n_dim, reduction=reduction)
|
|
152
|
+
self.grad_module = grad_module
|
|
153
|
+
|
|
154
|
+
def abs(self, output, target):
|
|
155
|
+
"""
|
|
156
|
+
Compute absolute error in strain norm
|
|
157
|
+
|
|
158
|
+
:param output:
|
|
159
|
+
:param target:
|
|
160
|
+
:return:
|
|
161
|
+
"""
|
|
162
|
+
output, target = self.unsqueeze(output, target)
|
|
163
|
+
output_temp, target_temp = output[..., :1, *self.dims], target[..., :1, *self.dims]
|
|
164
|
+
output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
|
|
165
|
+
batch_dims = output_temp.shape[:self.ch_dim]
|
|
166
|
+
output_temp = torch.flatten(output_temp, start_dim=0, end_dim=self.ch_dim - 1)
|
|
167
|
+
target_temp = torch.flatten(target_temp, start_dim=0, end_dim=self.ch_dim - 1)
|
|
168
|
+
output_grad = self.grad_module(output_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
|
|
169
|
+
target_grad = self.grad_module(target_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
|
|
170
|
+
output_temp = torch.unflatten(output_temp, dim=0, sizes=batch_dims)
|
|
171
|
+
target_temp = torch.unflatten(target_temp, dim=0, sizes=batch_dims)
|
|
172
|
+
loss = torch.sqrt(
|
|
173
|
+
torch.linalg.norm((output_grad - target_grad) * (output_flux - target_flux), dim=self.ch_dim).nanmean(self.dims_list)
|
|
174
|
+
)
|
|
175
|
+
loss = self.reduce(loss)
|
|
176
|
+
return loss
|
|
177
|
+
|
|
178
|
+
def rel(self, output, target):
|
|
179
|
+
"""
|
|
180
|
+
Compute absolute error in strain norm
|
|
181
|
+
|
|
182
|
+
:param output:
|
|
183
|
+
:param target:
|
|
184
|
+
:return:
|
|
185
|
+
"""
|
|
186
|
+
output, target = self.unsqueeze(output, target)
|
|
187
|
+
output_temp, target_temp = output[..., :1, *self.dims], target[..., :1, *self.dims]
|
|
188
|
+
output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
|
|
189
|
+
batch_dims = output_temp.shape[:self.ch_dim]
|
|
190
|
+
output_temp = torch.flatten(output_temp, start_dim=0, end_dim=self.ch_dim - 1)
|
|
191
|
+
target_temp = torch.flatten(target_temp, start_dim=0, end_dim=self.ch_dim - 1)
|
|
192
|
+
output_grad = self.grad_module(output_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
|
|
193
|
+
target_grad = self.grad_module(target_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
|
|
194
|
+
output_temp = torch.unflatten(output_temp, dim=0, sizes=batch_dims)
|
|
195
|
+
target_temp = torch.unflatten(target_temp, dim=0, sizes=batch_dims)
|
|
196
|
+
loss = torch.sqrt(
|
|
197
|
+
torch.linalg.norm((output_grad - target_grad) * (output_flux - target_flux), dim=self.ch_dim).nanmean(self.dims_list)
|
|
198
|
+
/ torch.linalg.norm(target_grad * target_flux, dim=self.ch_dim).nanmean(self.dims_list)
|
|
199
|
+
)
|
|
200
|
+
loss = self.reduce(loss)
|
|
201
|
+
return loss
|
|
202
|
+
|
|
203
|
+
def __call__(self, output_disp: torch.Tensor, target_disp: torch.Tensor, param_field=None):
|
|
204
|
+
return self.abs(output_disp, target_disp)
|
|
205
|
+
|
|
206
|
+
def __str__(self):
|
|
207
|
+
return f"ThermEnergy({self.reduction})"
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
class HeatCondResidualLoss(Loss):
|
|
211
|
+
"""
|
|
212
|
+
Homogenized stress loss function
|
|
213
|
+
"""
|
|
214
|
+
|
|
215
|
+
def __init__(self, div_module, n_dim=2, reduction="sum", residual_mode="mean"):
|
|
216
|
+
super().__init__(n_dim=n_dim, reduction=reduction)
|
|
217
|
+
self.div_module = div_module
|
|
218
|
+
self.residual_mode = residual_mode
|
|
219
|
+
|
|
220
|
+
def abs(self, output, target=None):
|
|
221
|
+
"""
|
|
222
|
+
Compute absolute error in strain norm
|
|
223
|
+
|
|
224
|
+
:param output:
|
|
225
|
+
:param target:
|
|
226
|
+
:return:
|
|
227
|
+
"""
|
|
228
|
+
output, target = self.unsqueeze(output, target)
|
|
229
|
+
output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
|
|
230
|
+
batch_dims = target_flux.shape[:self.ch_dim]
|
|
231
|
+
output_flux = torch.flatten(output_flux, start_dim=0, end_dim=self.ch_dim - 1).unsqueeze(self.ch_dim - 1)
|
|
232
|
+
output_residual = self.div_module(output_flux)
|
|
233
|
+
output_residual = torch.unflatten(output_residual, dim=0, sizes=batch_dims)
|
|
234
|
+
if self.residual_mode == "sum":
|
|
235
|
+
loss = torch.abs(output_residual).nansum(self.dims_list)
|
|
236
|
+
elif self.residual_mode == "mean":
|
|
237
|
+
loss = torch.abs(output_residual).nanmean(self.dims_list)
|
|
238
|
+
else:
|
|
239
|
+
raise ValueError("Unknown residual_mode")
|
|
240
|
+
loss = self.reduce(loss).squeeze(-1)
|
|
241
|
+
return loss
|
|
242
|
+
|
|
243
|
+
def __call__(self, output: torch.Tensor, target):
|
|
244
|
+
return self.abs(output, target)
|
|
245
|
+
|
|
246
|
+
def __str__(self):
|
|
247
|
+
return f"HeatCondResidual({self.reduction})"
|
|
File without changes
|
|
@@ -28,6 +28,11 @@ unocg/problems/thermal.py
|
|
|
28
28
|
unocg/solvers/__init__.py
|
|
29
29
|
unocg/solvers/base.py
|
|
30
30
|
unocg/solvers/torch.py
|
|
31
|
+
unocg/training/__init__.py
|
|
32
|
+
unocg/training/losses/__init__.py
|
|
33
|
+
unocg/training/losses/base.py
|
|
34
|
+
unocg/training/losses/mechanical.py
|
|
35
|
+
unocg/training/losses/thermal.py
|
|
31
36
|
unocg/transforms/__init__.py
|
|
32
37
|
unocg/transforms/base.py
|
|
33
38
|
unocg/transforms/fourier.py
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|