unocg 0.0.2__tar.gz → 0.0.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unocg might be problematic. Click here for more details.

Files changed (44) hide show
  1. {unocg-0.0.2/unocg.egg-info → unocg-0.0.4}/PKG-INFO +2 -2
  2. {unocg-0.0.2 → unocg-0.0.4}/pyproject.toml +6 -6
  3. unocg-0.0.4/unocg/training/__init__.py +0 -0
  4. unocg-0.0.4/unocg/training/losses/__init__.py +1 -0
  5. unocg-0.0.4/unocg/training/losses/base.py +102 -0
  6. unocg-0.0.4/unocg/training/losses/mechanical.py +246 -0
  7. unocg-0.0.4/unocg/training/losses/thermal.py +247 -0
  8. unocg-0.0.4/unocg/utils/__init__.py +0 -0
  9. unocg-0.0.4/unocg/utils/data.py +95 -0
  10. unocg-0.0.4/unocg/utils/evaluation.py +48 -0
  11. unocg-0.0.4/unocg/utils/plotting.py +339 -0
  12. {unocg-0.0.2 → unocg-0.0.4/unocg.egg-info}/PKG-INFO +2 -2
  13. {unocg-0.0.2 → unocg-0.0.4}/unocg.egg-info/SOURCES.txt +10 -1
  14. {unocg-0.0.2 → unocg-0.0.4}/LICENSE +0 -0
  15. {unocg-0.0.2 → unocg-0.0.4}/README.md +0 -0
  16. {unocg-0.0.2 → unocg-0.0.4}/setup.cfg +0 -0
  17. {unocg-0.0.2 → unocg-0.0.4}/setup.py +0 -0
  18. {unocg-0.0.2 → unocg-0.0.4}/unocg/__init__.py +0 -0
  19. {unocg-0.0.2 → unocg-0.0.4}/unocg/base.py +0 -0
  20. {unocg-0.0.2 → unocg-0.0.4}/unocg/config.py +0 -0
  21. {unocg-0.0.2 → unocg-0.0.4}/unocg/materials/__init__.py +0 -0
  22. {unocg-0.0.2 → unocg-0.0.4}/unocg/materials/base.py +0 -0
  23. {unocg-0.0.2 → unocg-0.0.4}/unocg/materials/mechanical.py +0 -0
  24. {unocg-0.0.2 → unocg-0.0.4}/unocg/materials/thermal.py +0 -0
  25. {unocg-0.0.2 → unocg-0.0.4}/unocg/modules/__init__.py +0 -0
  26. {unocg-0.0.2 → unocg-0.0.4}/unocg/modules/operators.py +0 -0
  27. {unocg-0.0.2 → unocg-0.0.4}/unocg/modules/preconditioners.py +0 -0
  28. {unocg-0.0.2 → unocg-0.0.4}/unocg/modules/solvers.py +0 -0
  29. {unocg-0.0.2 → unocg-0.0.4}/unocg/preconditioners/__init__.py +0 -0
  30. {unocg-0.0.2 → unocg-0.0.4}/unocg/preconditioners/base.py +0 -0
  31. {unocg-0.0.2 → unocg-0.0.4}/unocg/preconditioners/torch.py +0 -0
  32. {unocg-0.0.2 → unocg-0.0.4}/unocg/problems/__init__.py +0 -0
  33. {unocg-0.0.2 → unocg-0.0.4}/unocg/problems/base.py +0 -0
  34. {unocg-0.0.2 → unocg-0.0.4}/unocg/problems/mechanical.py +0 -0
  35. {unocg-0.0.2 → unocg-0.0.4}/unocg/problems/thermal.py +0 -0
  36. {unocg-0.0.2 → unocg-0.0.4}/unocg/solvers/__init__.py +0 -0
  37. {unocg-0.0.2 → unocg-0.0.4}/unocg/solvers/base.py +0 -0
  38. {unocg-0.0.2 → unocg-0.0.4}/unocg/solvers/torch.py +0 -0
  39. {unocg-0.0.2 → unocg-0.0.4}/unocg/transforms/__init__.py +0 -0
  40. {unocg-0.0.2 → unocg-0.0.4}/unocg/transforms/base.py +0 -0
  41. {unocg-0.0.2 → unocg-0.0.4}/unocg/transforms/fourier.py +0 -0
  42. {unocg-0.0.2 → unocg-0.0.4}/unocg.egg-info/dependency_links.txt +0 -0
  43. {unocg-0.0.2 → unocg-0.0.4}/unocg.egg-info/requires.txt +1 -1
  44. {unocg-0.0.2 → unocg-0.0.4}/unocg.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: unocg
3
- Version: 0.0.2
3
+ Version: 0.0.4
4
4
  Summary: UNO-CG
5
5
  Author-email: Julius Herb <herb@mib.uni-stuttgart.de>, Felix Fritzen <fritzen@mib.uni-stuttgart.de>
6
6
  License-Expression: MIT
@@ -20,9 +20,9 @@ Requires-Dist: torch==2.10.0
20
20
  Requires-Dist: tqdm~=4.67.1
21
21
  Requires-Dist: torchvision~=0.25.0
22
22
  Requires-Dist: pyvista[jupyter]~=0.46.5
23
+ Requires-Dist: jupyterlab~=4.5.3
23
24
  Provides-Extra: dev
24
25
  Requires-Dist: jupytext~=1.16.4; extra == "dev"
25
- Requires-Dist: jupyterlab~=4.5.3; extra == "dev"
26
26
  Requires-Dist: pre_commit~=3.8.0; extra == "dev"
27
27
  Requires-Dist: sphinx~=7.4.7; extra == "dev"
28
28
  Requires-Dist: sphinx-rtd-theme~=2.0.0; extra == "dev"
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "unocg"
3
- version = "0.0.2"
3
+ version = "0.0.4"
4
4
  authors = [
5
5
  { name="Julius Herb", email="herb@mib.uni-stuttgart.de" },
6
6
  { name="Felix Fritzen", email="fritzen@mib.uni-stuttgart.de" },
@@ -23,19 +23,19 @@ dependencies = [
23
23
  "torch==2.10.0",
24
24
  "tqdm~=4.67.1",
25
25
  "torchvision~=0.25.0",
26
- "pyvista[jupyter]~=0.46.5"
26
+ "pyvista[jupyter]~=0.46.5",
27
+ "jupyterlab~=4.5.3",
27
28
  ]
28
29
 
29
30
  [project.optional-dependencies]
30
31
  dev = [
31
32
  "jupytext~=1.16.4",
32
- "jupyterlab~=4.5.3",
33
33
  "pre_commit~=3.8.0",
34
34
  "sphinx~=7.4.7",
35
35
  "sphinx-rtd-theme~=2.0.0",
36
- 'myst-parser~=4.0.0',
37
- 'nbsphinx~=0.9.5',
38
- 'sphinx-gallery~=0.17.1',
36
+ "myst-parser~=4.0.0",
37
+ "nbsphinx~=0.9.5",
38
+ "sphinx-gallery~=0.17.1",
39
39
  ]
40
40
  all = [
41
41
  "unocg[dev]"
File without changes
@@ -0,0 +1 @@
1
+ from .base import Loss, WeightedLoss
@@ -0,0 +1,102 @@
1
+ """
2
+ Definition of loss function that allow for a physics-informed training of machine learning models
3
+ """
4
+ from abc import ABC
5
+ from typing import Union, Iterable, List, Optional
6
+
7
+ # third-party packages
8
+ import torch
9
+
10
+
11
+ class Loss(ABC):
12
+ """
13
+ Abstract loss function
14
+ """
15
+
16
+ def __init__(self, n_dim: int = 2, reduction: str = "sum"):
17
+ """
18
+
19
+ :param n_dim: dimension of the problem
20
+ :param reduction: ('mean'|'sum'|'none'), defaults to 'sum'
21
+ :type reduction: str, optional
22
+ """
23
+ super().__init__()
24
+ self.n_dim = n_dim
25
+ self.ch_dim = -(1 + self.n_dim)
26
+ self.reduction = reduction
27
+
28
+ def reduce(self, loss):
29
+ """
30
+ Perform a reduction step over all datasets to transform a loss function to a cost function.
31
+
32
+ A loss function is evaluated element-wise for a dataset.
33
+ However, a cost function should return a single value for the dataset.
34
+ Typically, `mean` reduction is used.
35
+
36
+ :param loss: Tensor that contains the element-wise loss for a dataset
37
+ :type loss: :class:`torch.Tensor`
38
+ :return: Reduced loss
39
+ :rtype: float
40
+ """
41
+ if self.reduction == "mean":
42
+ return torch.nanmean(loss)
43
+ elif self.reduction == "sum":
44
+ return torch.nansum(loss)
45
+ else:
46
+ return loss
47
+
48
+ def unsqueeze(self, output, target):
49
+ """
50
+ Ensure that the tensors :code:`output` and :code:`target` have a shape of the form :code:`(N, features)`.
51
+
52
+ When a loss function is called with a single data point, the tensor shape is :code:`(features)` and hence does not fit.
53
+ This method expands the dimensions if needed.
54
+
55
+ :param output: Model output
56
+ :type output: :class:`torch.Tensor`
57
+ :param target: Target data
58
+ :type target: :class:`torch.Tensor`
59
+ :return: Tuple (output, target)
60
+ :rtype: tuple
61
+ """
62
+ while output.ndim < self.n_dim + 2:
63
+ output = torch.unsqueeze(output, 0)
64
+ while target.ndim < output.ndim:
65
+ target = torch.unsqueeze(target, 0)
66
+ while output.ndim < target.ndim:
67
+ output = torch.unsqueeze(output, 0)
68
+ assert output.ndim >= self.n_dim + 2 and output.ndim == target.ndim
69
+ return output, target
70
+ # return output.flatten(end_dim=-(self.n_dim + 2)), target.flatten(end_dim=-(self.n_dim + 2))
71
+
72
+ @property
73
+ def dims(self):
74
+ return self.n_dim * (slice(None),)
75
+
76
+ @property
77
+ def dims_list(self):
78
+ return tuple(range(-1, -(1 + self.n_dim), -1))
79
+
80
+ @property
81
+ def expand_dims(self):
82
+ return self.n_dim * (None,)
83
+
84
+
85
+ class WeightedLoss(Loss):
86
+ """
87
+ Weighted loss function that represents a linear combination of several loss functions
88
+ """
89
+
90
+ def __init__(self, losses: Iterable[any], weights: Iterable[Union[float, int]], reduction="mean"):
91
+ super().__init__(reduction=reduction)
92
+ self.losses = losses
93
+ self.weights = weights
94
+
95
+ def __call__(self, output, target):
96
+ total_loss = 0.0
97
+ for loss, weight in zip(self.losses, self.weights):
98
+ total_loss += weight * loss(output, target)
99
+ return total_loss
100
+
101
+ def __str__(self):
102
+ return f"Weighted(...)"
@@ -0,0 +1,246 @@
1
+ import torch
2
+ from .base import Loss
3
+
4
+
5
+ class DispLoss(Loss):
6
+ """
7
+ Displacement loss function
8
+ """
9
+
10
+ def __init__(self, n_dim=2, reduction="sum"):
11
+ super().__init__(n_dim=n_dim, reduction=reduction)
12
+
13
+ def abs(self, output, target):
14
+ """
15
+ Compute absolute error for temperature field
16
+
17
+ :param output:
18
+ :param target:
19
+ :return:
20
+ """
21
+ output, target = self.unsqueeze(output, target)
22
+ output_disp, target_disp = output[..., :self.n_dim, *self.dims], target[..., :self.n_dim, *self.dims]
23
+ loss = torch.sqrt(torch.nn.MSELoss(reduction="none")(target_disp, output_disp).nanmean(self.dims_list))
24
+ loss = self.reduce(loss).squeeze(-1)
25
+ return loss
26
+
27
+ def rel(self, output, target):
28
+ """
29
+ Compute relative error for temperature field
30
+
31
+ :param output:
32
+ :param target:
33
+ :return:
34
+ """
35
+ output, target = self.unsqueeze(output, target)
36
+ output_disp, target_disp = output[..., :self.n_dim, *self.dims], target[..., :self.n_dim, *self.dims]
37
+ loss = torch.linalg.norm(target_disp - output_disp, dim=-(1 + self.n_dim)).nanmean(self.dims_list) \
38
+ / torch.linalg.norm(target_disp, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
39
+ loss = self.reduce(loss).squeeze(-1)
40
+ return loss
41
+
42
+ def __call__(self, x, y):
43
+ return self.rel(x, y)
44
+
45
+ def __str__(self):
46
+ return f"Temp({self.reduction})"
47
+
48
+
49
+ class StrainLoss(Loss):
50
+ """
51
+ Strain loss function
52
+ """
53
+
54
+ def __init__(self, grad_module, n_dim=2, reduction="sum"):
55
+ super().__init__(n_dim=n_dim, reduction=reduction)
56
+ self.grad_module = grad_module
57
+
58
+ def abs(self, output, target):
59
+ """
60
+ Compute absolute error for temperature field
61
+
62
+ :param output:
63
+ :param target:
64
+ :return:
65
+ """
66
+ output, target = self.unsqueeze(output, target)
67
+ output_temp, target_temp = output[..., :self.n_dim, *self.dims], target[..., :self.n_dim, *self.dims]
68
+ loss = torch.linalg.norm(target_temp - output_temp, dim=self.ch_dim)
69
+ loss = self.reduce(loss)
70
+ return loss
71
+
72
+ def rel(self, output, target):
73
+ """
74
+ Compute relative error for temperature field
75
+
76
+ :param output:
77
+ :param target:
78
+ :return:
79
+ """
80
+ output, target = self.unsqueeze(output, target)
81
+ output_temp, target_temp = output[..., :self.n_dim, *self.dims], target[..., :self.n_dim, *self.dims]
82
+ batch_dims = output_temp.shape[:self.ch_dim]
83
+ output_temp = torch.flatten(output_temp, start_dim=0, end_dim=self.ch_dim - 1)
84
+ target_temp = torch.flatten(target_temp, start_dim=0, end_dim=self.ch_dim - 1)
85
+ output_grad = self.grad_module(output_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
86
+ target_grad = self.grad_module(target_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
87
+ output_temp = torch.unflatten(output_temp, dim=0, sizes=batch_dims)
88
+ target_temp = torch.unflatten(target_temp, dim=0, sizes=batch_dims)
89
+ loss = torch.linalg.norm(target_grad - output_grad, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
90
+ loss = self.reduce(loss)
91
+ return loss
92
+
93
+ def __call__(self, x, y):
94
+ return self.rel(x, y)
95
+
96
+ def __str__(self):
97
+ return f"Grad({self.reduction})"
98
+
99
+
100
+ class StressLoss(Loss):
101
+ """
102
+ Flux loss function
103
+ """
104
+
105
+ def __init__(self, n_dim=2, reduction="sum"):
106
+ super().__init__(n_dim=n_dim, reduction=reduction)
107
+
108
+ def abs(self, output, target):
109
+ """
110
+ Compute absolute error in strain norm
111
+
112
+ :param output:
113
+ :param target:
114
+ :return:
115
+ """
116
+ output, target = self.unsqueeze(output, target)
117
+ output_flux, target_flux = output[..., self.n_dim:, *self.dims], target[..., self.n_dim:, *self.dims]
118
+ loss = torch.linalg.norm(target_flux - output_flux, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
119
+ loss = self.reduce(loss)
120
+ return loss
121
+
122
+ def rel(self, output, target):
123
+ """
124
+ Compute absolute error in strain norm
125
+
126
+ :param output:
127
+ :param target:
128
+ :return:
129
+ """
130
+ output, target = self.unsqueeze(output, target)
131
+ output_flux, target_flux = output[..., self.n_dim:, *self.dims], target[..., self.n_dim:, *self.dims]
132
+ loss = torch.linalg.norm(target_flux - output_flux, dim=-(1 + self.n_dim)).nanmean(self.dims_list) \
133
+ / torch.linalg.norm(target_flux, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
134
+ loss = self.reduce(loss)
135
+ return loss
136
+
137
+ def __call__(self, output: torch.Tensor, target: torch.Tensor):
138
+ return self.rel(output, target)
139
+
140
+ def __str__(self):
141
+ return f"Flux({self.reduction})"
142
+
143
+
144
+ class MechanicalEnergyLoss(Loss):
145
+ """
146
+ Thermal energy loss function
147
+ """
148
+
149
+ def __init__(self, grad_module, n_dim=2, reduction="sum"):
150
+ super().__init__(n_dim=n_dim, reduction=reduction)
151
+ self.grad_module = grad_module
152
+
153
+ def abs(self, output, target):
154
+ """
155
+ Compute absolute error in strain norm
156
+
157
+ :param output:
158
+ :param target:
159
+ :return:
160
+ """
161
+ output, target = self.unsqueeze(output, target)
162
+ output_temp, target_temp = output[..., :self.n_dim, *self.dims], target[..., :self.n_dim, *self.dims]
163
+ output_flux, target_flux = output[..., self.n_dim:, *self.dims], target[..., self.n_dim:, *self.dims]
164
+ batch_dims = output_temp.shape[:self.ch_dim]
165
+ output_temp = torch.flatten(output_temp, start_dim=0, end_dim=self.ch_dim - 1)
166
+ target_temp = torch.flatten(target_temp, start_dim=0, end_dim=self.ch_dim - 1)
167
+ output_grad = self.grad_module(output_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
168
+ target_grad = self.grad_module(target_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
169
+ output_temp = torch.unflatten(output_temp, dim=0, sizes=batch_dims)
170
+ target_temp = torch.unflatten(target_temp, dim=0, sizes=batch_dims)
171
+ loss = torch.sqrt(
172
+ torch.linalg.norm((output_grad - target_grad) * (output_flux - target_flux), dim=self.ch_dim).nanmean(self.dims_list)
173
+ )
174
+ loss = self.reduce(loss)
175
+ return loss
176
+
177
+ def rel(self, output, target):
178
+ """
179
+ Compute absolute error in strain norm
180
+
181
+ :param output:
182
+ :param target:
183
+ :return:
184
+ """
185
+ output, target = self.unsqueeze(output, target)
186
+ output_temp, target_temp = output[..., :self.n_dim, *self.dims], target[..., :self.n_dim, *self.dims]
187
+ output_flux, target_flux = output[..., self.n_dim:, *self.dims], target[..., self.n_dim:, *self.dims]
188
+ batch_dims = output_temp.shape[:self.ch_dim]
189
+ output_temp = torch.flatten(output_temp, start_dim=0, end_dim=self.ch_dim - 1)
190
+ target_temp = torch.flatten(target_temp, start_dim=0, end_dim=self.ch_dim - 1)
191
+ output_grad = self.grad_module(output_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
192
+ target_grad = self.grad_module(target_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
193
+ output_temp = torch.unflatten(output_temp, dim=0, sizes=batch_dims)
194
+ target_temp = torch.unflatten(target_temp, dim=0, sizes=batch_dims)
195
+ loss = torch.sqrt(
196
+ torch.linalg.norm((output_grad - target_grad) * (output_flux - target_flux), dim=self.ch_dim).nanmean(self.dims_list)
197
+ / torch.linalg.norm(target_grad * target_flux, dim=self.ch_dim).nanmean(self.dims_list)
198
+ )
199
+ loss = self.reduce(loss)
200
+ return loss
201
+
202
+ def __call__(self, output_disp: torch.Tensor, target_disp: torch.Tensor, param_field=None):
203
+ return self.abs(output_disp, target_disp)
204
+
205
+ def __str__(self):
206
+ return f"ThermEnergy({self.reduction})"
207
+
208
+
209
+ class LinearElasticResidualLoss(Loss):
210
+ """
211
+ Homogenized stress loss function
212
+ """
213
+
214
+ def __init__(self, div_module, n_dim=2, reduction="sum", residual_mode="mean"):
215
+ super().__init__(n_dim=n_dim, reduction=reduction)
216
+ self.div_module = div_module
217
+ self.residual_mode = residual_mode
218
+
219
+ def abs(self, output, target=None):
220
+ """
221
+ Compute absolute error in strain norm
222
+
223
+ :param output:
224
+ :param target:
225
+ :return:
226
+ """
227
+ output, target = self.unsqueeze(output, target)
228
+ output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
229
+ batch_dims = target_flux.shape[:self.ch_dim]
230
+ output_flux = torch.flatten(output_flux, start_dim=0, end_dim=self.ch_dim - 1).unsqueeze(self.ch_dim - 1)
231
+ output_residual = self.div_module(output_flux)
232
+ output_residual = torch.unflatten(output_residual, dim=0, sizes=batch_dims)
233
+ if self.residual_mode == "sum":
234
+ loss = torch.abs(output_residual).nansum(self.dims_list)
235
+ elif self.residual_mode == "mean":
236
+ loss = torch.abs(output_residual).nanmean(self.dims_list)
237
+ else:
238
+ raise ValueError("Unknown residual_mode")
239
+ loss = self.reduce(loss).squeeze(-1)
240
+ return loss
241
+
242
+ def __call__(self, output: torch.Tensor, target):
243
+ return self.abs(output, target)
244
+
245
+ def __str__(self):
246
+ return f"HeatCondResidual({self.reduction})"
@@ -0,0 +1,247 @@
1
+ import torch
2
+ from .base import Loss
3
+
4
+
5
+ class TempLoss(Loss):
6
+ """
7
+ Displacement loss function
8
+ """
9
+
10
+ def __init__(self, n_dim=2, reduction="sum"):
11
+ super().__init__(n_dim=n_dim, reduction=reduction)
12
+
13
+ def abs(self, output, target):
14
+ """
15
+ Compute absolute error for temperature field
16
+
17
+ :param output:
18
+ :param target:
19
+ :return:
20
+ """
21
+ output, target = self.unsqueeze(output, target)
22
+ output_temp, target_temp = output[..., :1, *self.dims], target[..., :1, *self.dims]
23
+ loss = torch.sqrt(torch.nn.MSELoss(reduction="none")(target_temp, output_temp).nanmean(self.dims_list))
24
+ loss = self.reduce(loss).squeeze(-1)
25
+ return loss
26
+
27
+ def rel(self, output, target):
28
+ """
29
+ Compute relative error for temperature field
30
+
31
+ :param output:
32
+ :param target:
33
+ :return:
34
+ """
35
+ output, target = self.unsqueeze(output, target)
36
+ output_temp, target_temp = output[..., :1, *self.dims], target[..., :1, *self.dims]
37
+ loss = torch.sqrt(torch.nn.MSELoss(reduction="none")(target_temp, output_temp).nanmean(self.dims_list) / \
38
+ torch.nn.MSELoss(reduction="none")(target_temp, torch.zeros_like(target_temp)).nanmean(self.dims_list))
39
+ loss = self.reduce(loss).squeeze(-1)
40
+ return loss
41
+
42
+ def __call__(self, x, y):
43
+ return self.rel(x, y)
44
+
45
+ def __str__(self):
46
+ return f"Temp({self.reduction})"
47
+
48
+
49
+ class GradLoss(Loss):
50
+ """
51
+ Displacement loss function
52
+ """
53
+
54
+ def __init__(self, grad_module, n_dim=2, reduction="sum"):
55
+ super().__init__(n_dim=n_dim, reduction=reduction)
56
+ self.grad_module = grad_module
57
+
58
+ def abs(self, output, target):
59
+ """
60
+ Compute absolute error for temperature field
61
+
62
+ :param output:
63
+ :param target:
64
+ :return:
65
+ """
66
+ output, target = self.unsqueeze(output, target)
67
+ output_temp, target_temp = output[..., :1, *self.dims], target[..., :1, *self.dims]
68
+ loss = torch.linalg.norm(target_temp - output_temp, dim=-3)
69
+ loss = self.reduce(loss)
70
+ return loss
71
+
72
+ def rel(self, output, target):
73
+ """
74
+ Compute relative error for temperature field
75
+
76
+ :param output:
77
+ :param target:
78
+ :return:
79
+ """
80
+ output, target = self.unsqueeze(output, target)
81
+ output_temp, target_temp = output[..., :1, *self.dims], target[..., :1, *self.dims]
82
+ # output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
83
+ batch_dims = output_temp.shape[:self.ch_dim]
84
+ output_temp = torch.flatten(output_temp, start_dim=0, end_dim=self.ch_dim - 1)
85
+ target_temp = torch.flatten(target_temp, start_dim=0, end_dim=self.ch_dim - 1)
86
+ output_grad = self.grad_module(output_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
87
+ target_grad = self.grad_module(target_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
88
+ output_temp = torch.unflatten(output_temp, dim=0, sizes=batch_dims)
89
+ target_temp = torch.unflatten(target_temp, dim=0, sizes=batch_dims)
90
+ loss = torch.linalg.norm(target_grad - output_grad, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
91
+ loss = self.reduce(loss)
92
+ return loss
93
+
94
+ def __call__(self, x, y):
95
+ return self.rel(x, y)
96
+
97
+ def __str__(self):
98
+ return f"Grad({self.reduction})"
99
+
100
+
101
+ class FluxLoss(Loss):
102
+ """
103
+ Flux loss function
104
+ """
105
+
106
+ def __init__(self, n_dim=2, reduction="sum"):
107
+ super().__init__(n_dim=n_dim, reduction=reduction)
108
+
109
+ def abs(self, output, target):
110
+ """
111
+ Compute absolute error in strain norm
112
+
113
+ :param output:
114
+ :param target:
115
+ :return:
116
+ """
117
+ output, target = self.unsqueeze(output, target)
118
+ output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
119
+ loss = torch.linalg.norm(target_flux - output_flux, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
120
+ loss = self.reduce(loss)
121
+ return loss
122
+
123
+ def rel(self, output, target):
124
+ """
125
+ Compute absolute error in strain norm
126
+
127
+ :param output:
128
+ :param target:
129
+ :return:
130
+ """
131
+ output, target = self.unsqueeze(output, target)
132
+ output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
133
+ loss = torch.linalg.norm(target_flux - output_flux, dim=-(1 + self.n_dim)).nanmean(self.dims_list) \
134
+ / torch.linalg.norm(target_flux, dim=-(1 + self.n_dim)).nanmean(self.dims_list)
135
+ loss = self.reduce(loss)
136
+ return loss
137
+
138
+ def __call__(self, output: torch.Tensor, target: torch.Tensor):
139
+ return self.rel(output, target)
140
+
141
+ def __str__(self):
142
+ return f"Flux({self.reduction})"
143
+
144
+
145
+ class ThermalEnergyLoss(Loss):
146
+ """
147
+ Thermal energy loss function
148
+ """
149
+
150
+ def __init__(self, grad_module, n_dim=2, reduction="sum"):
151
+ super().__init__(n_dim=n_dim, reduction=reduction)
152
+ self.grad_module = grad_module
153
+
154
+ def abs(self, output, target):
155
+ """
156
+ Compute absolute error in strain norm
157
+
158
+ :param output:
159
+ :param target:
160
+ :return:
161
+ """
162
+ output, target = self.unsqueeze(output, target)
163
+ output_temp, target_temp = output[..., :1, *self.dims], target[..., :1, *self.dims]
164
+ output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
165
+ batch_dims = output_temp.shape[:self.ch_dim]
166
+ output_temp = torch.flatten(output_temp, start_dim=0, end_dim=self.ch_dim - 1)
167
+ target_temp = torch.flatten(target_temp, start_dim=0, end_dim=self.ch_dim - 1)
168
+ output_grad = self.grad_module(output_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
169
+ target_grad = self.grad_module(target_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
170
+ output_temp = torch.unflatten(output_temp, dim=0, sizes=batch_dims)
171
+ target_temp = torch.unflatten(target_temp, dim=0, sizes=batch_dims)
172
+ loss = torch.sqrt(
173
+ torch.linalg.norm((output_grad - target_grad) * (output_flux - target_flux), dim=self.ch_dim).nanmean(self.dims_list)
174
+ )
175
+ loss = self.reduce(loss)
176
+ return loss
177
+
178
+ def rel(self, output, target):
179
+ """
180
+ Compute absolute error in strain norm
181
+
182
+ :param output:
183
+ :param target:
184
+ :return:
185
+ """
186
+ output, target = self.unsqueeze(output, target)
187
+ output_temp, target_temp = output[..., :1, *self.dims], target[..., :1, *self.dims]
188
+ output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
189
+ batch_dims = output_temp.shape[:self.ch_dim]
190
+ output_temp = torch.flatten(output_temp, start_dim=0, end_dim=self.ch_dim - 1)
191
+ target_temp = torch.flatten(target_temp, start_dim=0, end_dim=self.ch_dim - 1)
192
+ output_grad = self.grad_module(output_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
193
+ target_grad = self.grad_module(target_temp).nanmean(-(2 + self.n_dim)) # average over gauss points
194
+ output_temp = torch.unflatten(output_temp, dim=0, sizes=batch_dims)
195
+ target_temp = torch.unflatten(target_temp, dim=0, sizes=batch_dims)
196
+ loss = torch.sqrt(
197
+ torch.linalg.norm((output_grad - target_grad) * (output_flux - target_flux), dim=self.ch_dim).nanmean(self.dims_list)
198
+ / torch.linalg.norm(target_grad * target_flux, dim=self.ch_dim).nanmean(self.dims_list)
199
+ )
200
+ loss = self.reduce(loss)
201
+ return loss
202
+
203
+ def __call__(self, output_disp: torch.Tensor, target_disp: torch.Tensor, param_field=None):
204
+ return self.abs(output_disp, target_disp)
205
+
206
+ def __str__(self):
207
+ return f"ThermEnergy({self.reduction})"
208
+
209
+
210
+ class HeatCondResidualLoss(Loss):
211
+ """
212
+ Homogenized stress loss function
213
+ """
214
+
215
+ def __init__(self, div_module, n_dim=2, reduction="sum", residual_mode="mean"):
216
+ super().__init__(n_dim=n_dim, reduction=reduction)
217
+ self.div_module = div_module
218
+ self.residual_mode = residual_mode
219
+
220
+ def abs(self, output, target=None):
221
+ """
222
+ Compute absolute error in strain norm
223
+
224
+ :param output:
225
+ :param target:
226
+ :return:
227
+ """
228
+ output, target = self.unsqueeze(output, target)
229
+ output_flux, target_flux = output[..., 1:, *self.dims], target[..., 1:, *self.dims]
230
+ batch_dims = target_flux.shape[:self.ch_dim]
231
+ output_flux = torch.flatten(output_flux, start_dim=0, end_dim=self.ch_dim - 1).unsqueeze(self.ch_dim - 1)
232
+ output_residual = self.div_module(output_flux)
233
+ output_residual = torch.unflatten(output_residual, dim=0, sizes=batch_dims)
234
+ if self.residual_mode == "sum":
235
+ loss = torch.abs(output_residual).nansum(self.dims_list)
236
+ elif self.residual_mode == "mean":
237
+ loss = torch.abs(output_residual).nanmean(self.dims_list)
238
+ else:
239
+ raise ValueError("Unknown residual_mode")
240
+ loss = self.reduce(loss).squeeze(-1)
241
+ return loss
242
+
243
+ def __call__(self, output: torch.Tensor, target):
244
+ return self.abs(output, target)
245
+
246
+ def __str__(self):
247
+ return f"HeatCondResidual({self.reduction})"
File without changes
@@ -0,0 +1,95 @@
1
+ """
2
+ Data loading for microstructure datasets
3
+ """
4
+
5
+ import math
6
+ from typing import Optional, Union, Dict
7
+
8
+ import torch
9
+ import h5py
10
+
11
+
12
+ class MicrostructureDataset(torch.utils.data.Dataset):
13
+ """
14
+ Represents a dataset in a microstructure from a HDF5 file
15
+ """
16
+ def __init__(
17
+ self,
18
+ file_name: str,
19
+ group_name: str,
20
+ lazy_loading: Optional[bool] = True,
21
+ device = "cpu",
22
+ dtype: Optional[torch.dtype] = None,
23
+ ):
24
+ """
25
+ Constructor of the class. Create a `PyTorch` dataset from given HDF5 file groups.
26
+
27
+ :param file_name: path to the HDF5 file
28
+ :type file_name: str
29
+ :param group_name: path to the group in the HDF5 file
30
+ :type group_name: str
31
+ :param lazy_loading:
32
+ :type lazy_loading: bool
33
+ :param dtype:
34
+ """
35
+ super().__init__()
36
+ self.file_name = file_name
37
+ self.group_name = group_name
38
+ self.lazy_loading = lazy_loading
39
+ self.keys = []
40
+ self.loaded_keys = []
41
+ self.images = {}
42
+ if dtype is None:
43
+ self.dtype = torch.float32
44
+ else:
45
+ self.dtype = dtype
46
+ self.device = device
47
+ self.tensor_args = {"dtype": self.dtype, "device": self.device}
48
+
49
+ with h5py.File(self.file_name, "r") as file:
50
+ for dset_name in file[self.group_name].keys():
51
+ self.keys.append(dset_name)
52
+ if not self.lazy_loading:
53
+ for dset_name in self.keys:
54
+ self.load_dset(dset_name)
55
+
56
+ def __len__(self) -> int:
57
+ """
58
+ Get the length of the dataset, i.e. how many data points it contains.
59
+
60
+ :return: Length of the dataset
61
+ :rtype: int
62
+ """
63
+ return len(self.keys)
64
+
65
+ def __getitem__(self, index: int) -> Dict[str, Union[torch.tensor, str]]:
66
+ """
67
+ Fetch a data point with given index from the dataset
68
+
69
+ :param index: Index of the data point
70
+ :type index: int
71
+ :return: microstructure image
72
+ :rtype: torch.Tensor
73
+ """
74
+ if index >= len(self.keys):
75
+ raise ValueError("Dataset is not available")
76
+ dset_name = self.keys[index]
77
+ self.load_dset(dset_name, force_loading=False)
78
+ return self.images[dset_name].clone()
79
+
80
+ def load_dset(self, dset_name: str, force_loading: Optional[bool] = True):
81
+ """
82
+ Load dataset from HDF5 file
83
+
84
+ :param dset_name:
85
+ :param force_loading:
86
+ :return:
87
+ """
88
+ if (not force_loading) and (dset_name in self.loaded_keys):
89
+ return
90
+
91
+ with h5py.File(self.file_name, "r") as file:
92
+ image = torch.tensor(file[self.group_name][dset_name]["image"][...], **self.tensor_args)
93
+
94
+ self.images[dset_name] = image
95
+ self.loaded_keys.append(dset_name)
@@ -0,0 +1,48 @@
1
+ from typing import Dict
2
+
3
+ import torch
4
+ from unocg.problems import Problem
5
+ from unocg.solvers.torch import CgSolver
6
+ from unocg.modules.solvers import CgModule
7
+ from unocg.preconditioners import Preconditioner
8
+ import timeit
9
+
10
+
11
+ def benchmark(model, inputs, device, n_runs=2, n_warmup=1, verbose=True):
12
+ def run_model():
13
+ with torch.inference_mode():
14
+ model(*inputs)
15
+ torch.cuda.synchronize(device)
16
+
17
+ for _ in range(n_warmup):
18
+ run_model()
19
+ model_time = timeit.timeit(run_model, number=n_runs) / n_runs
20
+ if verbose:
21
+ print(f"Runtime per execution: {model_time*1000.:.4f}ms")
22
+ return model_time
23
+
24
+
25
+ def benchmark_cg(cg_module, param_fields, loadings, device, n_runs=2, n_warmup=1, verbose=True):
26
+ with torch.inference_mode():
27
+ guess = cg_module.zero_guess(param_fields, loadings)
28
+
29
+ inputs_module = (guess, param_fields, loadings)
30
+
31
+ print(f"Overall solver:")
32
+ u = cg_module(*inputs_module)
33
+ time_overall = benchmark(cg_module, inputs_module, device, n_runs=n_runs)
34
+
35
+ iter_layer = cg_module.iteration_layers[0]
36
+ print(f"Solver iteration:")
37
+ batch_shape = (*param_fields.shape[:(-cg_module.n_dim - 1)], loadings.shape[0])
38
+ field_shape = (*batch_shape, *cg_module.shape)
39
+ iv_fields, iv_scalars = iter_layer.init_internal_variables(batch_shape=batch_shape, init_residual=u)
40
+ time_iter = benchmark(iter_layer, (u, u, param_fields, loadings, iv_fields, iv_scalars), device, n_runs=n_runs*10)
41
+
42
+ print(f"Preconditioner application:")
43
+ time_prec = benchmark(cg_module.prec_model, (u,), n_runs=n_runs*100, n_warmup=n_warmup, device=device);
44
+
45
+ print(f"Residual computation:")
46
+ time_matvec = benchmark(cg_module.matvec_model, (u, param_fields), n_runs=n_runs*10, n_warmup=n_warmup, device=device)
47
+
48
+ return time_overall, time_iter, time_prec, time_matvec
@@ -0,0 +1,339 @@
1
+ """
2
+ Plotting utilities
3
+ """
4
+ from typing import Optional, Iterable, List, Tuple
5
+
6
+ import matplotlib
7
+ from matplotlib.tri import Triangulation
8
+ import numpy as np
9
+ import torch
10
+ from matplotlib import pyplot as plt, ticker
11
+ import math
12
+ import shutil
13
+ from unocg.problems import Problem, BC
14
+
15
+ try:
16
+ import pyvista as pv
17
+ except:
18
+ pass
19
+
20
+ plt.rcParams["text.usetex"] = True if shutil.which('latex') else False
21
+ matplotlib.rcParams["text.latex.preamble"] = r"\usepackage{amsmath}"
22
+ plot_width = 6.3
23
+
24
+ colors = [
25
+ "g",
26
+ "#00BEFF",
27
+ "#004191",
28
+ "r",
29
+ "k"
30
+ ]
31
+
32
+
33
+ def plot_ms(image: torch.Tensor, file: Optional[str] = None, show_axis: bool = False, show_cbar: bool = False):
34
+ """
35
+ Plot microstructure image
36
+
37
+ :param image:
38
+ :param file:
39
+ :param show_axis:
40
+ :param show_cbar:
41
+ :return:
42
+ """
43
+ image = image.detach().cpu()
44
+ fig, ax = plt.subplots(1, 1, figsize=[6.3 / 2, 2.65 if show_cbar else 6.3 / 2], dpi=300)
45
+ ms_cmap = plt.get_cmap("viridis", 2)
46
+ im = ax.imshow(image, origin="lower", interpolation="none", extent=(-0.5, 0.5, -0.5, 0.5), cmap=ms_cmap)
47
+
48
+ if show_cbar:
49
+ cb = fig.colorbar(im, ax=ax, ticks=[0, 1])
50
+ cb.ax.set_title(rf"$\chi_1$")
51
+ cb.ax.set_yticklabels(["$0$", "$1$"])
52
+
53
+ if show_axis:
54
+ ax.axis("on")
55
+ ax.set_xlim(-0.5, 0.5)
56
+ ax.set_ylim(-0.5, 0.5)
57
+ ax.set_xticks([-0.5, 0.0, 0.5])
58
+ ax.set_yticks([-0.5, 0.0, 0.5])
59
+ ax.set_xlabel(r"$\frac{x_1}{l_1}$")
60
+ ax.set_ylabel(r"$\frac{x_2}{l_2}$")
61
+ else:
62
+ ax.axis("off")
63
+
64
+ plt.tight_layout()
65
+ if file is not None:
66
+ plt.savefig(file, dpi=300)
67
+ plt.show()
68
+
69
+
70
+ def plot_prec_action(ax, field_ref, init_res, precs, labels, ch_idx=0, plot_res=False, centered=True, ch_label=None):
71
+ idx = (0,0,ch_idx)
72
+ ax_i = 0
73
+ if centered:
74
+ s_abs_max = torch.max(field_ref[idx].cpu().abs())
75
+ s_max, s_min = s_abs_max, -s_abs_max
76
+ else:
77
+ s_max, s_min = torch.max(field_ref[idx].cpu()), torch.min(field_ref[idx].cpu())
78
+
79
+ prec_actions = []
80
+ with torch.inference_mode():
81
+ for prec in precs:
82
+ prec_action = prec.apply_field(init_res)
83
+ prec_action = prec_action / prec_action[idx].max() * field_ref[idx].abs().max()
84
+ prec_actions.append(prec_action)
85
+ if centered:
86
+ s_abs_max = torch.maximum(s_abs_max, torch.max(prec_action[idx].cpu().abs()))
87
+ s_max, s_min = s_abs_max, -s_abs_max
88
+ else:
89
+ s_max = torch.maximum(s_max, torch.max(prec_action[idx].cpu()))
90
+ s_min = torch.minimum(s_min, torch.min(prec_action[idx].cpu()))
91
+
92
+ if plot_res:
93
+ im = ax[0,ax_i].imshow(init_res[idx].cpu().detach(), origin="lower", cmap="seismic")
94
+ if ch_label is None:
95
+ ax[0,ax_i].set_title(r"$\boldsymbol{r}^{(0)}$")
96
+ else:
97
+ ax[0,ax_i].set_title(rf"$\left( \boldsymbol{{r}}^{{(0)}} \right)_{{{ch_label}}}$")
98
+ plt.colorbar(im, ax=ax[ax_i])
99
+ ax_i += 1
100
+
101
+ im = ax[0,ax_i].imshow(field_ref[idx].cpu().detach(), origin="lower", cmap="jet", vmax=s_max, vmin=s_min)
102
+ if ch_label is None:
103
+ ax[0,ax_i].set_title(r"$\boldsymbol{u}_{\underline{\mu}}$")
104
+ else:
105
+ ax[0,ax_i].set_title(rf"$\left( \boldsymbol{{u}}_{{\underline{{a}}}} \right)_{{{ch_label}}}$")
106
+ plt.colorbar(im, ax=ax[0,ax_i])
107
+ ax_i += 1
108
+
109
+ for prec_action, label in zip(prec_actions, labels):
110
+ im = ax[0,ax_i].imshow(prec_action[idx].cpu().detach(), origin="lower", cmap="jet", vmax=s_max, vmin=s_min)
111
+ if ch_label is None:
112
+ ax[0,ax_i].set_title(rf"$\boldsymbol{{s}}_\text{{{label}}}$")
113
+ else:
114
+ ax[0,ax_i].set_title(rf"$\left( \boldsymbol{{s}}_\text{{{label}}} \right)_{{{ch_label}}}$")
115
+ plt.colorbar(im, ax=ax[0,ax_i])
116
+ ax_i += 1
117
+
118
+ for ax_handle in ax.ravel():
119
+ ax_handle.set_xticks([])
120
+ ax_handle.set_yticks([])
121
+
122
+
123
+ def plot_convergence(ax, results, labels, colors, metrics, metric_labels, load_names=None, rates=None, show_load_labels=False,
124
+ xmin=0, xmax=None, bounds=False, ymin=1e-10, ymax=1e0, zoom=False, zoom_it = 10, zoom_tol = 1e-3):
125
+ """
126
+
127
+ """
128
+ if load_names is None:
129
+ load_names = ["x"]
130
+ if rates is None:
131
+ rates = (None,) * len(results)
132
+ colors = colors[:len(results)]
133
+ while len(labels) < len(results):
134
+ labels.append("CG")
135
+
136
+ for load_i, load_name in enumerate(load_names):
137
+ if show_load_labels:
138
+ ax[0, load_i].set_title(f"loading {load_name}")
139
+
140
+ for metric_i, (metric, metric_label) in enumerate(zip(metrics, metric_labels)):
141
+ ax[metric_i, load_i].set_xlabel(r"iterations $[-]$")
142
+ ax[metric_i, load_i].set_ylabel(metric_label)
143
+ ax[metric_i, load_i].set_ylim(bottom=ymin, top=ymax)
144
+ if zoom:
145
+ axins = ax[metric_i, load_i].inset_axes([0.2, 0.15, 0.1, 0.45],
146
+ xlim=(-1, zoom_it), ylim=(zoom_tol, 1e0), xticklabels=[], yticklabels=[])
147
+ axins.grid()
148
+ axins.set_xticks([0, zoom_it], ["$0$", f"${zoom_it}$"])
149
+
150
+ for result, label, color, rate in zip(results, labels, colors, rates):
151
+ if result is None:
152
+ continue
153
+ iters = torch.arange(result["err_history"].shape[0])
154
+ metric_losses = result["losses"][metric]
155
+ if metric_losses.ndim == 1:
156
+ metric_losses = metric_losses.unsqueeze(-1)
157
+ ax[metric_i, load_i].semilogy(iters, metric_losses[:, load_i].cpu() / metric_losses[0, load_i].cpu(), '-', c=color, label=label)
158
+
159
+ if zoom:
160
+ axins.semilogy(iters, metric_losses[:, load_i].cpu() / metric_losses[0, load_i].cpu(), '-', c=color)
161
+
162
+ if zoom:
163
+ ax[metric_i, load_i].indicate_inset_zoom(axins, edgecolor="black")
164
+
165
+ for ax_handle in ax.ravel():
166
+ ax_handle.grid()
167
+ ax_handle.set_xlim(left=xmin, right=xmax)
168
+
169
+
170
+ def plot_convergence_histogram(ax, results, labels, colors, rtol=1e-6, bins=None, xmin=0, xmax=None, log_scale=False, legend=True):
171
+ """
172
+
173
+ """
174
+ for result, label, color in zip(results, labels, colors):
175
+ if result is None:
176
+ continue
177
+ metric = get_rel_residual(result)
178
+ iters = (metric > rtol).sum(dim=0)
179
+ ax.hist(iters, edgecolor=color, facecolor=color, bins=bins, label=label)
180
+ if log_scale:
181
+ ax.set_xlim(max(xmin, 1), xmax)
182
+ ax.set_xscale("log")
183
+ else:
184
+ ax.set_xlim(xmin, xmax)
185
+ ax.set_axisbelow(True)
186
+ ax.grid()
187
+ if legend:
188
+ ax.legend()
189
+
190
+ def get_rel_residual(result):
191
+ """
192
+
193
+ """
194
+ return result['err_history'].cpu().flatten(start_dim=-2) / result['err_history'].cpu().flatten(start_dim=-2)[0]
195
+
196
+
197
+ def plot_deformed_rve_2d(
198
+ problem,
199
+ disp,
200
+ field,
201
+ loading=None,
202
+ fluctuation_scaling: float = 1.0,
203
+ deformation_scaling: float = 1.0,
204
+ plot_loading: bool = False,
205
+ plot_boundary: bool = False,
206
+ shading: str = "gouraud",
207
+ file: Optional[str] = None,
208
+ vmin: Optional[List[float]] = None,
209
+ vmax: Optional[List[float]] = None,
210
+ figsize: Optional[List[float]] = None
211
+ ):
212
+ """
213
+
214
+ :param disp:
215
+ :param field:
216
+ :param loading:
217
+ :param fluctuation_scaling:
218
+ :param deformation_scaling:
219
+ :param plot_loading:
220
+ :param plot_boundary:
221
+ :param shading:
222
+ :param file:
223
+ :param vmin:
224
+ :param vmax:
225
+ :return:
226
+ """
227
+ def apply_mask(triang, alpha=0.4):
228
+ # Mask triangles with sidelength greater than a threshold alpha
229
+ triangles = triang.triangles
230
+ # Mask off unwanted triangles.
231
+ x = triang.x
232
+ y = triang.y
233
+ xtri = x[triangles] - np.roll(x[triangles], 1, axis=1)
234
+ ytri = y[triangles] - np.roll(y[triangles], 1, axis=1)
235
+ maxi = np.max(np.sqrt(xtri**2 + ytri**2), axis=1)
236
+ # apply masking
237
+ triang.set_mask(maxi > alpha)
238
+
239
+ if loading is None:
240
+ loading = torch.eye(3, dtype=disp.dtype, device=disp.device)
241
+
242
+ n_loadings = loading.shape[0]
243
+
244
+ #if problem.n_dim == 2:
245
+ # disp = disp.transpose(-1, -2)
246
+
247
+ deformed_coords = problem.get_deformed_coordinates(
248
+ disp, loading, fluctuation_scaling=fluctuation_scaling, deformation_scaling=deformation_scaling
249
+ )
250
+ loading_coords = problem.get_deformed_coordinates(
251
+ disp, loading, fluctuation_scaling=0.0, deformation_scaling=deformation_scaling
252
+ )
253
+ coords = problem.get_node_coords()
254
+ #boundary_idx = problem.get_boundary_idx()
255
+ #boundary = coords[..., boundary_idx[0], boundary_idx[1]]
256
+ #deformed_boundary = deformed_coords[..., boundary_idx[0], boundary_idx[1]]
257
+ #loading_boundary = loading_coords[..., boundary_idx[0], boundary_idx[1]]
258
+
259
+ loadings = [
260
+ rf"$\bar{{\boldsymbol{{\varepsilon}}}}=\bar{{\boldsymbol{{\varepsilon}}}}^{{({i + 1})}}$"
261
+ for i in range(n_loadings)
262
+ ]
263
+
264
+ if figsize is None:
265
+ figsize = [6.3, 2.0]
266
+
267
+ if problem.n_dim == 3:
268
+ N_cut = problem.n_grid[-1] // 2
269
+ field = field[..., N_cut]
270
+ deformed_coords = deformed_coords[..., N_cut]
271
+
272
+ fig, ax = plt.subplots(1, n_loadings, figsize=figsize, dpi=300, squeeze=False)
273
+ for load_idx, load_name in enumerate(loadings):
274
+ ax[0, load_idx].axis("off")
275
+ ax[0, load_idx].set_aspect("equal")
276
+ ax[0, load_idx].set_title(load_name)
277
+ tri = Triangulation(deformed_coords[load_idx, 0].ravel(), deformed_coords[load_idx, 1].ravel())
278
+ apply_mask(tri, alpha=0.02)
279
+
280
+ if vmin is None:
281
+ vmin_idx = field[load_idx].min().item()
282
+ else:
283
+ vmin_idx = vmin[load_idx]
284
+ if vmax is None:
285
+ vmax_idx = 0.5 * field[load_idx].max().item()
286
+ else:
287
+ vmax_idx = vmax[load_idx]
288
+
289
+ tpc = ax[0, load_idx].tripcolor(
290
+ tri, field[load_idx].ravel(), cmap="jet", shading=shading, rasterized=True, vmin=vmin_idx, vmax=vmax_idx
291
+ )
292
+
293
+ if plot_loading:
294
+ ax[0, load_idx].plot(
295
+ loading_boundary[load_idx, 0].ravel(),
296
+ loading_boundary[load_idx, 1].ravel(),
297
+ "k--",
298
+ lw=1,
299
+ )
300
+ if plot_boundary:
301
+ ax[0, load_idx].plot(
302
+ deformed_boundary[load_idx, 0].ravel(),
303
+ deformed_boundary[load_idx, 1].ravel(),
304
+ "r--",
305
+ lw=1,
306
+ )
307
+ clb = fig.colorbar(tpc)
308
+ clb.ax.set_title(r"$||\boldsymbol{\sigma}|| \,[\mathrm{GPa}]$")
309
+ plt.tight_layout()
310
+ if file is not None:
311
+ plt.savefig(file, dpi=300)
312
+ plt.show()
313
+
314
+
315
+ def plot_deformed_rve_3d(
316
+ problem,
317
+ disp,
318
+ field,
319
+ loadings,
320
+ fluctuation_scaling: float = 1.0,
321
+ deformation_scaling: float = 1.0,
322
+ file: Optional[str] = None,
323
+ vmin: Optional[List[float]] = None,
324
+ vmax: Optional[List[float]] = None,
325
+ figsize: Optional[List[float]] = None
326
+ ):
327
+ disp = torch.nn.functional.pad(disp, pad=[0,1,0,1,0,1], mode="circular")
328
+ coords = problem.get_node_coords().to(dtype=disp.dtype, device=disp.device)
329
+ deformations = problem.get_deformations(disp.transpose(-1, -3), loadings, fluctuation_scaling=1.0)[0]
330
+
331
+ x, y, z = coords[2].cpu().numpy(), coords[1].cpu().numpy(), coords[0].cpu().numpy()
332
+ grid = pv.StructuredGrid(x, y, z)
333
+ grid['vectors'] = deformations.flatten(start_dim=1).T.cpu().numpy() * deformation_scaling
334
+ warped = grid.warp_by_vector()
335
+
336
+ pl = pv.Plotter()
337
+ pl.add_mesh(warped, scalars=field.numpy().ravel(), clim=[vmin, vmax], label="stress norm", cmap="jet", lighting=True, diffuse=0.2, specular=1.0, ambient=0.6, scalar_bar_args={"vertical": True})
338
+ pl.screenshot(filename=file, window_size=figsize)
339
+ pl.show()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: unocg
3
- Version: 0.0.2
3
+ Version: 0.0.4
4
4
  Summary: UNO-CG
5
5
  Author-email: Julius Herb <herb@mib.uni-stuttgart.de>, Felix Fritzen <fritzen@mib.uni-stuttgart.de>
6
6
  License-Expression: MIT
@@ -20,9 +20,9 @@ Requires-Dist: torch==2.10.0
20
20
  Requires-Dist: tqdm~=4.67.1
21
21
  Requires-Dist: torchvision~=0.25.0
22
22
  Requires-Dist: pyvista[jupyter]~=0.46.5
23
+ Requires-Dist: jupyterlab~=4.5.3
23
24
  Provides-Extra: dev
24
25
  Requires-Dist: jupytext~=1.16.4; extra == "dev"
25
- Requires-Dist: jupyterlab~=4.5.3; extra == "dev"
26
26
  Requires-Dist: pre_commit~=3.8.0; extra == "dev"
27
27
  Requires-Dist: sphinx~=7.4.7; extra == "dev"
28
28
  Requires-Dist: sphinx-rtd-theme~=2.0.0; extra == "dev"
@@ -28,6 +28,15 @@ unocg/problems/thermal.py
28
28
  unocg/solvers/__init__.py
29
29
  unocg/solvers/base.py
30
30
  unocg/solvers/torch.py
31
+ unocg/training/__init__.py
32
+ unocg/training/losses/__init__.py
33
+ unocg/training/losses/base.py
34
+ unocg/training/losses/mechanical.py
35
+ unocg/training/losses/thermal.py
31
36
  unocg/transforms/__init__.py
32
37
  unocg/transforms/base.py
33
- unocg/transforms/fourier.py
38
+ unocg/transforms/fourier.py
39
+ unocg/utils/__init__.py
40
+ unocg/utils/data.py
41
+ unocg/utils/evaluation.py
42
+ unocg/utils/plotting.py
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
@@ -6,13 +6,13 @@ torch==2.10.0
6
6
  tqdm~=4.67.1
7
7
  torchvision~=0.25.0
8
8
  pyvista[jupyter]~=0.46.5
9
+ jupyterlab~=4.5.3
9
10
 
10
11
  [all]
11
12
  unocg[dev]
12
13
 
13
14
  [dev]
14
15
  jupytext~=1.16.4
15
- jupyterlab~=4.5.3
16
16
  pre_commit~=3.8.0
17
17
  sphinx~=7.4.7
18
18
  sphinx-rtd-theme~=2.0.0