evoxels 0.1.1__tar.gz → 0.1.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {evoxels-0.1.1 → evoxels-0.1.2}/PKG-INFO +1 -1
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/inversion.py +1 -1
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/precompiled_solvers/allen_cahn.py +1 -1
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/precompiled_solvers/cahn_hilliard.py +1 -1
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/problem_definition.py +29 -29
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/profiler.py +27 -10
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/solvers.py +2 -1
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/timesteppers.py +6 -6
- evoxels-0.1.2/evoxels/utils.py +439 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels.egg-info/PKG-INFO +1 -1
- {evoxels-0.1.1 → evoxels-0.1.2}/pyproject.toml +1 -1
- {evoxels-0.1.1 → evoxels-0.1.2}/tests/test_rhs.py +1 -1
- {evoxels-0.1.1 → evoxels-0.1.2}/tests/test_solvers.py +1 -1
- evoxels-0.1.1/evoxels/utils.py +0 -124
- {evoxels-0.1.1 → evoxels-0.1.2}/LICENSE +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/README.md +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/__init__.py +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/boundary_conditions.py +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/fd_stencils.py +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/function_approximators.py +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/precompiled_solvers/__init__.py +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/voxelfields.py +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels/voxelgrid.py +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels.egg-info/SOURCES.txt +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels.egg-info/dependency_links.txt +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels.egg-info/requires.txt +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/evoxels.egg-info/top_level.txt +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/setup.cfg +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/tests/test_fields.py +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/tests/test_inversion.py +0 -0
- {evoxels-0.1.1 → evoxels-0.1.2}/tests/test_laplace.py +0 -0
|
@@ -72,7 +72,7 @@ class InversionModel:
|
|
|
72
72
|
solver = PseudoSpectralIMEX_dfx(problem.fourier_symbol)
|
|
73
73
|
|
|
74
74
|
solution = dfx.diffeqsolve(
|
|
75
|
-
dfx.ODETerm(lambda t, y, args: problem.rhs(
|
|
75
|
+
dfx.ODETerm(lambda t, y, args: problem.rhs(t, y)),
|
|
76
76
|
solver,
|
|
77
77
|
t0=saveat.subs.ts[0],
|
|
78
78
|
t1=saveat.subs.ts[-1],
|
|
@@ -23,7 +23,7 @@ def run_allen_cahn_solver(
|
|
|
23
23
|
plot_bounds = None,
|
|
24
24
|
):
|
|
25
25
|
"""
|
|
26
|
-
|
|
26
|
+
Solves time-dependent Allen-Cahn problem with ForwardEuler timestepper.
|
|
27
27
|
"""
|
|
28
28
|
solver = TimeDependentSolver(
|
|
29
29
|
voxelfields,
|
|
@@ -20,7 +20,7 @@ def run_cahn_hilliard_solver(
|
|
|
20
20
|
plot_bounds = None,
|
|
21
21
|
):
|
|
22
22
|
"""
|
|
23
|
-
|
|
23
|
+
Solves time-dependent Cahn-Hilliard problem with PseudoSpectralIMEX timestepper.
|
|
24
24
|
"""
|
|
25
25
|
solver = TimeDependentSolver(
|
|
26
26
|
voxelfields,
|
|
@@ -13,17 +13,17 @@ _i_ = slice(1, -1) # inner elements [1:-1]
|
|
|
13
13
|
class ODE(ABC):
|
|
14
14
|
@property
|
|
15
15
|
@abstractmethod
|
|
16
|
-
def order(self):
|
|
16
|
+
def order(self) -> int:
|
|
17
17
|
"""Spatial order of convergence for numerical right-hand side."""
|
|
18
18
|
pass
|
|
19
19
|
|
|
20
20
|
@abstractmethod
|
|
21
|
-
def rhs_analytic(self,
|
|
21
|
+
def rhs_analytic(self, t, u):
|
|
22
22
|
"""Sympy expression of the problem right-hand side.
|
|
23
23
|
|
|
24
24
|
Args:
|
|
25
|
-
u : Sympy function of current state.
|
|
26
25
|
t (float): Current time.
|
|
26
|
+
u : Sympy function of current state.
|
|
27
27
|
|
|
28
28
|
Returns:
|
|
29
29
|
Sympy function of problem right-hand side.
|
|
@@ -31,12 +31,12 @@ class ODE(ABC):
|
|
|
31
31
|
pass
|
|
32
32
|
|
|
33
33
|
@abstractmethod
|
|
34
|
-
def rhs(self,
|
|
34
|
+
def rhs(self, t, u):
|
|
35
35
|
"""Numerical right-hand side of the ODE system.
|
|
36
36
|
|
|
37
37
|
Args:
|
|
38
|
-
u (array): Current state.
|
|
39
38
|
t (float): Current time.
|
|
39
|
+
u (array): Current state.
|
|
40
40
|
|
|
41
41
|
Returns:
|
|
42
42
|
Same type as ``u`` containing the time derivative.
|
|
@@ -122,7 +122,7 @@ class ReactionDiffusion(SemiLinearODE):
|
|
|
122
122
|
k_squared = self.vg.fft_k_squared_nonperiodic()
|
|
123
123
|
|
|
124
124
|
self._fourier_symbol = -self.D * self.A * k_squared
|
|
125
|
-
|
|
125
|
+
|
|
126
126
|
@property
|
|
127
127
|
def order(self):
|
|
128
128
|
return 2
|
|
@@ -130,14 +130,14 @@ class ReactionDiffusion(SemiLinearODE):
|
|
|
130
130
|
@property
|
|
131
131
|
def fourier_symbol(self):
|
|
132
132
|
return self._fourier_symbol
|
|
133
|
-
|
|
134
|
-
def _eval_f(self,
|
|
133
|
+
|
|
134
|
+
def _eval_f(self, t, c, lib):
|
|
135
135
|
"""Evaluate source/forcing term using ``self.f``."""
|
|
136
136
|
try:
|
|
137
|
-
return self.f(
|
|
137
|
+
return self.f(t, c, lib)
|
|
138
138
|
except TypeError:
|
|
139
|
-
return self.f(
|
|
140
|
-
|
|
139
|
+
return self.f(t, c)
|
|
140
|
+
|
|
141
141
|
@property
|
|
142
142
|
def bc_type(self):
|
|
143
143
|
return self.BC_type
|
|
@@ -145,12 +145,12 @@ class ReactionDiffusion(SemiLinearODE):
|
|
|
145
145
|
def pad_bc(self, u):
|
|
146
146
|
return self.pad_boundary(u, self.bcs[0], self.bcs[1])
|
|
147
147
|
|
|
148
|
-
def rhs_analytic(self,
|
|
149
|
-
return self.D*spv.laplacian(u) + self._eval_f(
|
|
150
|
-
|
|
151
|
-
def rhs(self,
|
|
148
|
+
def rhs_analytic(self, t, u):
|
|
149
|
+
return self.D*spv.laplacian(u) + self._eval_f(t, u, sp)
|
|
150
|
+
|
|
151
|
+
def rhs(self, t, u):
|
|
152
152
|
laplace = self.vg.laplace(self.pad_bc(u))
|
|
153
|
-
update = self.D * laplace + self._eval_f(
|
|
153
|
+
update = self.D * laplace + self._eval_f(t, u, self.vg.lib)
|
|
154
154
|
return update
|
|
155
155
|
|
|
156
156
|
@dataclass
|
|
@@ -185,15 +185,15 @@ class ReactionDiffusionSBM(ReactionDiffusion, SmoothedBoundaryODE):
|
|
|
185
185
|
def pad_bc(self, u):
|
|
186
186
|
return self.pad_boundary(u, self.bcs[0], self.bcs[1])
|
|
187
187
|
|
|
188
|
-
def rhs_analytic(self,
|
|
189
|
-
|
|
190
|
-
|
|
188
|
+
def rhs_analytic(self, t, u, mask):
|
|
189
|
+
grad_m = spv.gradient(mask)
|
|
190
|
+
norm_grad_m = sp.sqrt(grad_m.dot(grad_m))
|
|
191
191
|
|
|
192
|
-
divergence = spv.divergence(self.D*(
|
|
193
|
-
du = divergence +
|
|
192
|
+
divergence = spv.divergence(self.D*(spv.gradient(u) - u/mask*grad_m))
|
|
193
|
+
du = divergence + norm_grad_m*self.bc_flux + mask*self._eval_f(t, u/mask, sp)
|
|
194
194
|
return du
|
|
195
195
|
|
|
196
|
-
def rhs(self,
|
|
196
|
+
def rhs(self, t, u):
|
|
197
197
|
z = self.pad_bc(u)
|
|
198
198
|
divergence = self.vg.grad_x_face(self.vg.grad_x_face(z) -\
|
|
199
199
|
self.vg.to_x_face(z/self.mask) * self.vg.grad_x_face(self.mask)
|
|
@@ -207,7 +207,7 @@ class ReactionDiffusionSBM(ReactionDiffusion, SmoothedBoundaryODE):
|
|
|
207
207
|
|
|
208
208
|
update = self.D * divergence + \
|
|
209
209
|
self.norm*self.bc_flux + \
|
|
210
|
-
self.mask[:,1:-1,1:-1,1:-1]*self._eval_f(u/self.mask[:,1:-1,1:-1,1:-1],
|
|
210
|
+
self.mask[:,1:-1,1:-1,1:-1]*self._eval_f(t, u/self.mask[:,1:-1,1:-1,1:-1], self.vg.lib)
|
|
211
211
|
return update
|
|
212
212
|
|
|
213
213
|
|
|
@@ -249,13 +249,13 @@ class PeriodicCahnHilliard(SemiLinearODE):
|
|
|
249
249
|
except TypeError:
|
|
250
250
|
return self.mu_hom(c)
|
|
251
251
|
|
|
252
|
-
def rhs_analytic(self,
|
|
252
|
+
def rhs_analytic(self, t, c):
|
|
253
253
|
mu = self._eval_mu(c, sp) - 2*self.eps*spv.laplacian(c)
|
|
254
254
|
fluxes = self.D*c*(1-c)*spv.gradient(mu)
|
|
255
255
|
rhs = spv.divergence(fluxes)
|
|
256
256
|
return rhs
|
|
257
257
|
|
|
258
|
-
def rhs(self,
|
|
258
|
+
def rhs(self, t, c):
|
|
259
259
|
r"""Evaluate :math:`\partial c / \partial t` for the CH equation.
|
|
260
260
|
|
|
261
261
|
Numerical computation of
|
|
@@ -341,7 +341,7 @@ class AllenCahnEquation(SemiLinearODE):
|
|
|
341
341
|
except TypeError:
|
|
342
342
|
return self.potential(phi)
|
|
343
343
|
|
|
344
|
-
def rhs_analytic(self,
|
|
344
|
+
def rhs_analytic(self, t, phi):
|
|
345
345
|
grad = spv.gradient(phi)
|
|
346
346
|
laplace = spv.laplacian(phi)
|
|
347
347
|
norm_grad = sp.sqrt(grad.dot(grad))
|
|
@@ -354,7 +354,7 @@ class AllenCahnEquation(SemiLinearODE):
|
|
|
354
354
|
+ 3/self.eps * phi * (1-phi) * self.force
|
|
355
355
|
return self.M * df_dphi
|
|
356
356
|
|
|
357
|
-
def rhs(self,
|
|
357
|
+
def rhs(self, t, phi):
|
|
358
358
|
r"""Two-phase Allen-Cahn equation
|
|
359
359
|
|
|
360
360
|
Microstructural evolution of the order parameter ``\phi``
|
|
@@ -422,13 +422,13 @@ class CoupledReactionDiffusion(SemiLinearODE):
|
|
|
422
422
|
except TypeError:
|
|
423
423
|
return self.interaction(u)
|
|
424
424
|
|
|
425
|
-
def rhs_analytic(self,
|
|
425
|
+
def rhs_analytic(self, t, u):
|
|
426
426
|
interaction = self._eval_interaction(u, sp)
|
|
427
427
|
dc_A = self.D_A*spv.laplacian(u[0]) - interaction + self.feed * (1-u[0])
|
|
428
428
|
dc_B = self.D_B*spv.laplacian(u[1]) + interaction - self.kill * u[1]
|
|
429
429
|
return (dc_A, dc_B)
|
|
430
430
|
|
|
431
|
-
def rhs(self,
|
|
431
|
+
def rhs(self, t, u):
|
|
432
432
|
r"""Two-component reaction-diffusion system
|
|
433
433
|
|
|
434
434
|
Use batch channels for multiple species:
|
|
@@ -3,12 +3,20 @@ import psutil
|
|
|
3
3
|
import os
|
|
4
4
|
import subprocess
|
|
5
5
|
import tracemalloc
|
|
6
|
+
import shutil
|
|
6
7
|
from abc import ABC, abstractmethod
|
|
7
8
|
|
|
8
9
|
class MemoryProfiler(ABC):
|
|
9
10
|
"""Base interface for tracking host and device memory usage."""
|
|
11
|
+
def __init__(self):
|
|
12
|
+
self.max_used_cpu = 0.0
|
|
13
|
+
self.max_used_gpu = 0.0
|
|
14
|
+
self.track_gpu = False # subclasses set this
|
|
15
|
+
|
|
10
16
|
def get_cuda_memory_from_nvidia_smi(self):
|
|
11
17
|
"""Return currently used CUDA memory in megabytes."""
|
|
18
|
+
if shutil.which("nvidia-smi") is None:
|
|
19
|
+
return None
|
|
12
20
|
try:
|
|
13
21
|
output = subprocess.check_output(
|
|
14
22
|
['nvidia-smi', '--query-gpu=memory.used',
|
|
@@ -23,8 +31,10 @@ class MemoryProfiler(ABC):
|
|
|
23
31
|
process = psutil.Process(os.getpid())
|
|
24
32
|
used_cpu = process.memory_info().rss / 1024**2
|
|
25
33
|
self.max_used_cpu = np.max((self.max_used_cpu, used_cpu))
|
|
26
|
-
|
|
27
|
-
|
|
34
|
+
if self.track_gpu:
|
|
35
|
+
used = self.get_cuda_memory_from_nvidia_smi()
|
|
36
|
+
if used is not None:
|
|
37
|
+
self.max_used_gpu = np.max((self.max_used_gpu, used))
|
|
28
38
|
|
|
29
39
|
@abstractmethod
|
|
30
40
|
def print_memory_stats(self, start: float, end: float, iters: int):
|
|
@@ -35,13 +45,14 @@ class TorchMemoryProfiler(MemoryProfiler):
|
|
|
35
45
|
def __init__(self, device):
|
|
36
46
|
"""Initialize the profiler for a given torch device."""
|
|
37
47
|
import torch
|
|
48
|
+
super().__init__()
|
|
38
49
|
self.torch = torch
|
|
39
50
|
self.device = device
|
|
51
|
+
self.track_gpu = (device.type == 'cuda')
|
|
52
|
+
|
|
40
53
|
tracemalloc.start()
|
|
41
|
-
if
|
|
54
|
+
if self.track_gpu:
|
|
42
55
|
torch.cuda.reset_peak_memory_stats(device=device)
|
|
43
|
-
self.max_used_gpu = 0
|
|
44
|
-
self.max_used_cpu = 0
|
|
45
56
|
|
|
46
57
|
def print_memory_stats(self, start, end, iters):
|
|
47
58
|
"""Print usage statistics for the Torch backend."""
|
|
@@ -60,7 +71,10 @@ class TorchMemoryProfiler(MemoryProfiler):
|
|
|
60
71
|
elif self.device.type == 'cuda':
|
|
61
72
|
self.update_memory_stats()
|
|
62
73
|
used = self.get_cuda_memory_from_nvidia_smi()
|
|
63
|
-
|
|
74
|
+
if used is None:
|
|
75
|
+
print("GPU-RAM (nvidia-smi) unavailable.")
|
|
76
|
+
else:
|
|
77
|
+
print(f"GPU-RAM (nvidia-smi) current: {used} MB ({self.max_used_gpu} MB max)")
|
|
64
78
|
print(f"GPU-RAM (torch) current: "
|
|
65
79
|
f"{self.torch.cuda.memory_allocated(self.device) / 1024**2:.2f} MB "
|
|
66
80
|
f"({self.torch.cuda.max_memory_allocated(self.device) / 1024**2:.2f} MB max, "
|
|
@@ -70,9 +84,9 @@ class JAXMemoryProfiler(MemoryProfiler):
|
|
|
70
84
|
def __init__(self):
|
|
71
85
|
"""Initialize the profiler for JAX."""
|
|
72
86
|
import jax
|
|
87
|
+
super().__init__()
|
|
73
88
|
self.jax = jax
|
|
74
|
-
self.
|
|
75
|
-
self.max_used_cpu = 0
|
|
89
|
+
self.track_gpu = any(d.platform == "gpu" for d in jax.devices())
|
|
76
90
|
tracemalloc.start()
|
|
77
91
|
|
|
78
92
|
def print_memory_stats(self, start, end, iters):
|
|
@@ -88,7 +102,10 @@ class JAXMemoryProfiler(MemoryProfiler):
|
|
|
88
102
|
current = process.memory_info().rss / 1024**2
|
|
89
103
|
print(f"CPU-RAM (psutil) current: {current:.2f} MB ({self.max_used_cpu:.2f} MB max)")
|
|
90
104
|
|
|
91
|
-
if self.
|
|
105
|
+
if self.track_gpu:
|
|
92
106
|
self.update_memory_stats()
|
|
93
107
|
used = self.get_cuda_memory_from_nvidia_smi()
|
|
94
|
-
|
|
108
|
+
if used is None:
|
|
109
|
+
print("GPU-RAM (nvidia-smi) unavailable.")
|
|
110
|
+
else:
|
|
111
|
+
print(f"GPU-RAM (nvidia-smi) current: {used} MB ({self.max_used_gpu} MB max)")
|
|
@@ -99,7 +99,7 @@ class TimeDependentSolver:
|
|
|
99
99
|
self._handle_outputs(u, frame, time, slice_idx, vtk_out, verbose, plot_bounds, colormap)
|
|
100
100
|
frame += 1
|
|
101
101
|
|
|
102
|
-
u = step(
|
|
102
|
+
u = step(time, u)
|
|
103
103
|
|
|
104
104
|
end = timer()
|
|
105
105
|
time = max_iters * time_increment
|
|
@@ -129,6 +129,7 @@ class TimeDependentSolver:
|
|
|
129
129
|
filename = self.problem_cls.__name__ + "_" +\
|
|
130
130
|
self.fieldnames[0] + f"_{frame:03d}.vtk"
|
|
131
131
|
self.vf.export_to_vtk(filename=filename, field_names=self.fieldnames)
|
|
132
|
+
|
|
132
133
|
if verbose == 'plot':
|
|
133
134
|
clear_output(wait=True)
|
|
134
135
|
self.vf.plot_slice(self.fieldnames[0], slice_idx, time=time, colormap=colormap, value_bounds=plot_bounds)
|
|
@@ -16,13 +16,13 @@ class TimeStepper(ABC):
|
|
|
16
16
|
pass
|
|
17
17
|
|
|
18
18
|
@abstractmethod
|
|
19
|
-
def step(self,
|
|
19
|
+
def step(self, t: float, u: State) -> State:
|
|
20
20
|
"""
|
|
21
21
|
Take one timestep from t to (t+dt).
|
|
22
22
|
|
|
23
23
|
Args:
|
|
24
|
-
u : Current state
|
|
25
24
|
t : Current time
|
|
25
|
+
u : Current state
|
|
26
26
|
Returns:
|
|
27
27
|
Updated state at t + dt.
|
|
28
28
|
"""
|
|
@@ -39,8 +39,8 @@ class ForwardEuler(TimeStepper):
|
|
|
39
39
|
def order(self) -> int:
|
|
40
40
|
return 1
|
|
41
41
|
|
|
42
|
-
def step(self,
|
|
43
|
-
return u + self.dt * self.problem.rhs(
|
|
42
|
+
def step(self, t: float, u: State) -> State:
|
|
43
|
+
return u + self.dt * self.problem.rhs(t, u)
|
|
44
44
|
|
|
45
45
|
|
|
46
46
|
@dataclass
|
|
@@ -68,8 +68,8 @@ class PseudoSpectralIMEX(TimeStepper):
|
|
|
68
68
|
def order(self) -> int:
|
|
69
69
|
return 1
|
|
70
70
|
|
|
71
|
-
def step(self,
|
|
72
|
-
dc = self.pad(self.problem.rhs(
|
|
71
|
+
def step(self, t: float, u: State) -> State:
|
|
72
|
+
dc = self.pad(self.problem.rhs(t, u))
|
|
73
73
|
dc_fft = self._fft_prefac * self.problem.vg.rfftn(dc, dc.shape)
|
|
74
74
|
update = self.problem.vg.irfftn(dc_fft, dc.shape)[:,:u.shape[1]]
|
|
75
75
|
return u + update
|
|
@@ -0,0 +1,439 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import sympy as sp
|
|
3
|
+
import sympy.vector as spv
|
|
4
|
+
import evoxels as evo
|
|
5
|
+
from evoxels.problem_definition import SmoothedBoundaryODE
|
|
6
|
+
from evoxels.solvers import TimeDependentSolver
|
|
7
|
+
import contextlib
|
|
8
|
+
import io
|
|
9
|
+
import matplotlib.pyplot as plt
|
|
10
|
+
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 (needed for 3D projection)
|
|
11
|
+
from matplotlib.patches import Patch
|
|
12
|
+
|
|
13
|
+
### Generalized test case
|
|
14
|
+
def rhs_convergence_test(
|
|
15
|
+
ODE_class,
|
|
16
|
+
problem_kwargs,
|
|
17
|
+
test_function,
|
|
18
|
+
mask_function = None,
|
|
19
|
+
convention = "cell_center",
|
|
20
|
+
dtype = "float32",
|
|
21
|
+
powers = np.array([3,4,5,6,7]),
|
|
22
|
+
backend = "torch"
|
|
23
|
+
):
|
|
24
|
+
"""Evaluate spatial order of an ODE right-hand side.
|
|
25
|
+
|
|
26
|
+
``test_function`` can be a single sympy expression or a list of
|
|
27
|
+
expressions representing multiple variables. The returned error and
|
|
28
|
+
slope arrays have one entry for each provided function.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
ODE_class: ODE class with callable rhs(t, u).
|
|
32
|
+
problem_kwargs: problem-specific parameters to instantiate ODE.
|
|
33
|
+
test_function: single sympy expression or a list of expressions.
|
|
34
|
+
mask_function: static mask for smoothed boundary method.
|
|
35
|
+
convention: grid convention.
|
|
36
|
+
dtype: floate precision (``float32`` or ``float64``).
|
|
37
|
+
powers: refine grid in powers of two (i.e. ``Nx = 2**p``).
|
|
38
|
+
backend: use ``torch`` or ``jax`` for testing.
|
|
39
|
+
"""
|
|
40
|
+
# Verify mask_function only used with SmoothedBoundaryODE
|
|
41
|
+
if mask_function is not None and not issubclass(ODE_class, SmoothedBoundaryODE):
|
|
42
|
+
raise TypeError(
|
|
43
|
+
f"Mask function provided but {ODE_class.__name__} "
|
|
44
|
+
"is not a SmoothedBoundaryODE."
|
|
45
|
+
)
|
|
46
|
+
CS = spv.CoordSys3D('CS')
|
|
47
|
+
|
|
48
|
+
# Prepare lambdified mask if needed
|
|
49
|
+
# Assumed to be static i.e. no function of t
|
|
50
|
+
mask = (
|
|
51
|
+
sp.lambdify((CS.x, CS.y, CS.z), mask_function, "numpy")
|
|
52
|
+
if mask_function is not None
|
|
53
|
+
else None
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
if isinstance(test_function, (list, tuple)):
|
|
57
|
+
test_functions = list(test_function)
|
|
58
|
+
else:
|
|
59
|
+
test_functions = [test_function]
|
|
60
|
+
n_funcs = len(test_functions)
|
|
61
|
+
|
|
62
|
+
# Multiply test functions with mask for SBM testing
|
|
63
|
+
if mask is not None:
|
|
64
|
+
temp_list = []
|
|
65
|
+
for func in test_functions:
|
|
66
|
+
temp_list.append(func*mask_function)
|
|
67
|
+
test_functions = temp_list
|
|
68
|
+
|
|
69
|
+
dx = np.zeros(len(powers))
|
|
70
|
+
errors = np.zeros((n_funcs, len(powers)))
|
|
71
|
+
|
|
72
|
+
for i, p in enumerate(powers):
|
|
73
|
+
if convention == 'cell_center':
|
|
74
|
+
vf = evo.VoxelFields((2**p, 2**p, 2**p), (1, 1, 1), convention=convention)
|
|
75
|
+
elif convention == 'staggered_x':
|
|
76
|
+
vf = evo.VoxelFields((2**p + 1, 2**p, 2**p), (1, 1, 1), convention=convention)
|
|
77
|
+
vf.precision = dtype
|
|
78
|
+
dx[i] = vf.spacing[0]
|
|
79
|
+
|
|
80
|
+
if backend == 'torch':
|
|
81
|
+
vg = evo.voxelgrid.VoxelGridTorch(vf.grid_info(), precision=vf.precision, device='cpu')
|
|
82
|
+
elif backend == 'jax':
|
|
83
|
+
vg = evo.voxelgrid.VoxelGridJax(vf.grid_info(), precision=vf.precision)
|
|
84
|
+
|
|
85
|
+
# Init mask if smoothed boundary ODE
|
|
86
|
+
numpy_grid = vf.meshgrid()
|
|
87
|
+
if mask is not None:
|
|
88
|
+
problem_kwargs["mask"] = mask(*numpy_grid)
|
|
89
|
+
|
|
90
|
+
# Initialise fields
|
|
91
|
+
u_list = []
|
|
92
|
+
for func in test_functions:
|
|
93
|
+
init_fun = sp.lambdify((CS.x, CS.y, CS.z), func, "numpy")
|
|
94
|
+
init_data = init_fun(*numpy_grid)
|
|
95
|
+
u_list.append(vg.init_scalar_field(init_data))
|
|
96
|
+
|
|
97
|
+
u = vg.concatenate(u_list, 0)
|
|
98
|
+
u = vg.bc.trim_boundary_nodes(u)
|
|
99
|
+
|
|
100
|
+
ODE = ODE_class(vg, **problem_kwargs)
|
|
101
|
+
rhs_numeric = ODE.rhs(0, u)
|
|
102
|
+
|
|
103
|
+
if n_funcs > 1 and mask is not None:
|
|
104
|
+
rhs_analytic = ODE.rhs_analytic(0, test_functions, mask_function)
|
|
105
|
+
elif n_funcs > 1 and mask is None:
|
|
106
|
+
rhs_analytic = ODE.rhs_analytic(0, test_functions)
|
|
107
|
+
elif n_funcs == 1 and mask is not None:
|
|
108
|
+
rhs_analytic = [ODE.rhs_analytic(0, test_functions[0], mask_function)]
|
|
109
|
+
else:
|
|
110
|
+
rhs_analytic = [ODE.rhs_analytic(0, test_functions[0])]
|
|
111
|
+
|
|
112
|
+
# Compute solutions
|
|
113
|
+
for j, func in enumerate(test_functions):
|
|
114
|
+
comp = vg.export_scalar_field_to_numpy(rhs_numeric[j:j+1])
|
|
115
|
+
exact_fun = sp.lambdify((CS.x, CS.y, CS.z), rhs_analytic[j], "numpy")
|
|
116
|
+
exact = exact_fun(*numpy_grid)
|
|
117
|
+
if convention == "staggered_x":
|
|
118
|
+
exact = exact[1:-1, :, :]
|
|
119
|
+
|
|
120
|
+
# Error norm
|
|
121
|
+
diff = comp - exact
|
|
122
|
+
errors[j, i] = np.linalg.norm(diff) / np.linalg.norm(exact)
|
|
123
|
+
|
|
124
|
+
# Fit slope after loop
|
|
125
|
+
slopes = np.array(
|
|
126
|
+
[np.polyfit(np.log(dx), np.log(err), 1)[0] for err in errors]
|
|
127
|
+
)
|
|
128
|
+
if slopes.size == 1:
|
|
129
|
+
slopes = slopes[0]
|
|
130
|
+
order = ODE.order
|
|
131
|
+
|
|
132
|
+
return dx, errors if errors.shape[0] > 1 else errors[0], slopes, order
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def mms_convergence_test(
|
|
136
|
+
ODE_class, # an ODE class with callable rhs(field, t)->torch.Tensor (shape [x,y,z])
|
|
137
|
+
problem_kwargs, # problem parameters to instantiate ODE
|
|
138
|
+
test_function, # exact init_fun(x,y,z)->np.ndarray
|
|
139
|
+
mask_function=None,
|
|
140
|
+
timestepper_cls=None,
|
|
141
|
+
convention="cell_center",
|
|
142
|
+
dtype="float32",
|
|
143
|
+
mode = 'temporal',
|
|
144
|
+
g_powers = np.array([3,4,5,6,7]),
|
|
145
|
+
t_powers = np.array([3,4,5,6,7]),
|
|
146
|
+
t_final = 1,
|
|
147
|
+
backend = "jax",
|
|
148
|
+
device = 'cpu'
|
|
149
|
+
):
|
|
150
|
+
"""Evaluate temporal and spatial order of ODE solution.
|
|
151
|
+
|
|
152
|
+
``test_function`` can be a single sympy expression or a list of
|
|
153
|
+
expressions representing multiple variables. The returned error and
|
|
154
|
+
slope arrays have one entry for each provided function.
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
ODE_class: ODE class with callable rhs(t, u).
|
|
158
|
+
problem_kwargs: problem-specific parameters to instantiate ODE.
|
|
159
|
+
test_function: single sympy expression or a list of expressions.
|
|
160
|
+
mask_function: static mask for smoothed boundary method.
|
|
161
|
+
timestepper_cls: timestepper class with callable step(t, u).
|
|
162
|
+
convention: grid convention.
|
|
163
|
+
dtype: floate precision (``float32`` or ``float64``).
|
|
164
|
+
mode: Use ``temporal`` or ``spatial`` to construct MMS forcing.
|
|
165
|
+
g_powers: refine grid in powers of two (i.e. ``Nx = 2**p``).
|
|
166
|
+
t_powers: refine time increment in powers of two (i.e. ``dt = 2**p``).
|
|
167
|
+
t_final: End time for evaluation. Should be order of L^2/D.
|
|
168
|
+
backend: use ``torch`` or ``jax`` for testing.
|
|
169
|
+
device: use ``cpu`` or ``cuda`` for testing in torch.
|
|
170
|
+
"""
|
|
171
|
+
# Verify mask_function only used with SmoothedBoundaryODE
|
|
172
|
+
if mask_function is not None and not issubclass(ODE_class, SmoothedBoundaryODE):
|
|
173
|
+
raise TypeError(
|
|
174
|
+
f"Mask function provided but {ODE_class.__name__} "
|
|
175
|
+
"is not a SmoothedBoundaryODE."
|
|
176
|
+
)
|
|
177
|
+
CS = spv.CoordSys3D('CS')
|
|
178
|
+
t = sp.symbols('t', real=True)
|
|
179
|
+
|
|
180
|
+
# Prepare lambdified mask if needed
|
|
181
|
+
# Assumed to be static i.e. no function of t
|
|
182
|
+
mask = (
|
|
183
|
+
sp.lambdify((CS.x, CS.y, CS.z), mask_function, "numpy")
|
|
184
|
+
if mask_function is not None
|
|
185
|
+
else None
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
if isinstance(test_function, (list, tuple)):
|
|
189
|
+
test_functions = list(test_function)
|
|
190
|
+
else:
|
|
191
|
+
test_functions = [test_function]
|
|
192
|
+
n_funcs = len(test_functions)
|
|
193
|
+
|
|
194
|
+
# Multiply test functions with mask for SBM testing
|
|
195
|
+
if mask is not None:
|
|
196
|
+
temp_list = []
|
|
197
|
+
for func in test_functions:
|
|
198
|
+
temp_list.append(func*mask_function)
|
|
199
|
+
test_functions = temp_list
|
|
200
|
+
|
|
201
|
+
if mode == 'temporal':
|
|
202
|
+
u_list = [sp.lambdify((t, CS.x, CS.y, CS.z),
|
|
203
|
+
sp.N(func), backend) \
|
|
204
|
+
for func in test_functions]
|
|
205
|
+
u_t_list = [sp.lambdify((t, CS.x, CS.y, CS.z),
|
|
206
|
+
sp.N(sp.diff(func, t)), backend) \
|
|
207
|
+
for func in test_functions]
|
|
208
|
+
|
|
209
|
+
dx = np.zeros(len(g_powers))
|
|
210
|
+
dt = np.zeros(len(t_powers))
|
|
211
|
+
errors = np.zeros((n_funcs, len(t_powers), len(g_powers)))
|
|
212
|
+
|
|
213
|
+
for i, p in enumerate(g_powers):
|
|
214
|
+
if convention == 'cell_center':
|
|
215
|
+
vf = evo.VoxelFields((2**p, 2**p, 2**p), (1, 1, 1), convention=convention)
|
|
216
|
+
elif convention == 'staggered_x':
|
|
217
|
+
vf = evo.VoxelFields((2**p + 1, 2**p, 2**p), (1, 1, 1), convention=convention)
|
|
218
|
+
vf.precision = dtype
|
|
219
|
+
dx[i] = vf.spacing[0]
|
|
220
|
+
|
|
221
|
+
if backend == 'torch':
|
|
222
|
+
vg = evo.voxelgrid.VoxelGridTorch(vf.grid_info(), precision=vf.precision, device=device)
|
|
223
|
+
elif backend == 'jax':
|
|
224
|
+
vg = evo.voxelgrid.VoxelGridJax(vf.grid_info(), precision=vf.precision)
|
|
225
|
+
|
|
226
|
+
# Init mask if smoothed boundary ODE
|
|
227
|
+
numpy_grid = vf.meshgrid()
|
|
228
|
+
if mask is not None:
|
|
229
|
+
problem_kwargs["mask"] = mask(*numpy_grid)
|
|
230
|
+
|
|
231
|
+
ODE = ODE_class(vg, **problem_kwargs)
|
|
232
|
+
rhs_orig = ODE.rhs
|
|
233
|
+
grid = vg.meshgrid()
|
|
234
|
+
|
|
235
|
+
# Construct new rhs including forcing term from MMS
|
|
236
|
+
if mode == 'temporal':
|
|
237
|
+
def mms_rhs(t, u):
|
|
238
|
+
"""Manufactured solution rhs
|
|
239
|
+
with numerical evaluation of rhs in forcing, i.e.
|
|
240
|
+
forcing = du/dt_exact(t,grid) - rhs_num(t, u_exact(t,grid))
|
|
241
|
+
"""
|
|
242
|
+
rhs = rhs_orig(t, u)
|
|
243
|
+
t_ = vg.to_backend(t)
|
|
244
|
+
u_ex_list = []
|
|
245
|
+
for j, func in enumerate(test_functions):
|
|
246
|
+
u_ex_list.append(vg.expand_dim(u_list[j](t_, *grid), 0))
|
|
247
|
+
rhs = vg.set(rhs, j, rhs[j] + u_t_list[j](t_, *grid))
|
|
248
|
+
u_ex = vg.concatenate(u_ex_list, 0)
|
|
249
|
+
u_ex = vg.bc.trim_boundary_nodes(u_ex)
|
|
250
|
+
rhs -= rhs_orig(t, u_ex)
|
|
251
|
+
return rhs
|
|
252
|
+
|
|
253
|
+
elif mode == 'spatial':
|
|
254
|
+
if n_funcs > 1 and mask is not None:
|
|
255
|
+
rhs_func = ODE.rhs_analytic(t, test_functions, mask_function)
|
|
256
|
+
rhs_analytic = [sp.lambdify((t, CS.x, CS.y, CS.z), sp.N(func), backend) for func in rhs_func]
|
|
257
|
+
elif n_funcs > 1 and mask is None:
|
|
258
|
+
rhs_func = ODE.rhs_analytic(t, test_functions)
|
|
259
|
+
rhs_analytic = [sp.lambdify((t, CS.x, CS.y, CS.z), sp.N(func), backend) for func in rhs_func]
|
|
260
|
+
elif n_funcs == 1 and mask is not None:
|
|
261
|
+
rhs_func = ODE.rhs_analytic(t, test_functions[0], mask_function)
|
|
262
|
+
rhs_analytic = [sp.lambdify((t, CS.x, CS.y, CS.z), sp.N(rhs_func), backend)]
|
|
263
|
+
else:
|
|
264
|
+
rhs_func = ODE.rhs_analytic(t, test_functions[0])
|
|
265
|
+
rhs_analytic = [sp.lambdify((t, CS.x, CS.y, CS.z), sp.N(rhs_func), backend)]
|
|
266
|
+
|
|
267
|
+
def mms_rhs(t, u):
|
|
268
|
+
"""Manufactured solution rhs
|
|
269
|
+
with analytical evaluation of rhs in forcing, i.e.
|
|
270
|
+
forcing = du/dt_exact(t,grid) - rhs_exact(t, grid)
|
|
271
|
+
"""
|
|
272
|
+
rhs = rhs_orig(t, u)
|
|
273
|
+
t_ = vg.to_backend(t)
|
|
274
|
+
for j, func in enumerate(test_functions):
|
|
275
|
+
rhs = vg.set(rhs, j, rhs[j] - rhs_analytic[j](t_, *grid))
|
|
276
|
+
rhs = vg.set(rhs, j, rhs[j] + u_t_list[j](t_, *grid))
|
|
277
|
+
return rhs
|
|
278
|
+
else:
|
|
279
|
+
raise ValueError("Mode must be 'temporal' or 'spatial'.")
|
|
280
|
+
|
|
281
|
+
# Over-write original rhs with contructed mms_rhs
|
|
282
|
+
ODE.rhs = mms_rhs
|
|
283
|
+
|
|
284
|
+
# Loop over time refinements
|
|
285
|
+
for k, q in enumerate(t_powers):
|
|
286
|
+
# Initialise fields
|
|
287
|
+
field_names = []
|
|
288
|
+
for j, func in enumerate(test_functions):
|
|
289
|
+
fun = sp.lambdify((t, CS.x, CS.y, CS.z), func, "numpy")
|
|
290
|
+
init_data = fun(0, *numpy_grid)
|
|
291
|
+
final_data = fun(t_final, *numpy_grid)
|
|
292
|
+
vf.add_field(f'u{j}', init_data)
|
|
293
|
+
vf.add_field(f'u{j}_final', final_data)
|
|
294
|
+
field_names.append(f'u{j}')
|
|
295
|
+
|
|
296
|
+
# Init time increment and step function
|
|
297
|
+
dt[k] = t_final / 2**q
|
|
298
|
+
timestepper = timestepper_cls(ODE, dt[k])
|
|
299
|
+
step = timestepper.step
|
|
300
|
+
|
|
301
|
+
# Init solver
|
|
302
|
+
solver = TimeDependentSolver(
|
|
303
|
+
vf, field_names,
|
|
304
|
+
backend, device=device,
|
|
305
|
+
step_fn=step
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
# Wrap solve to capture NaN exit
|
|
309
|
+
nan_hit = False
|
|
310
|
+
buf = io.StringIO()
|
|
311
|
+
with contextlib.redirect_stdout(buf):
|
|
312
|
+
try:
|
|
313
|
+
solver.solve(dt[k], 8, int(2**q), problem_kwargs, verbose=False)
|
|
314
|
+
except SystemExit:
|
|
315
|
+
nan_hit = True
|
|
316
|
+
|
|
317
|
+
if nan_hit:
|
|
318
|
+
errors[:, k, i] = np.nan
|
|
319
|
+
continue
|
|
320
|
+
|
|
321
|
+
# Compute relative L2 error
|
|
322
|
+
for j, func in enumerate(test_functions):
|
|
323
|
+
exact = vf.fields[f'u{j}_final']
|
|
324
|
+
diff = vf.fields[f'u{j}'] - exact
|
|
325
|
+
errors[j, k, i] = np.linalg.norm(diff) / np.linalg.norm(exact)
|
|
326
|
+
|
|
327
|
+
# Fit slope after loop
|
|
328
|
+
def calc_slope(x, y):
|
|
329
|
+
mask = np.isfinite(y)
|
|
330
|
+
if mask.sum() < 2:
|
|
331
|
+
return np.nan
|
|
332
|
+
return np.polyfit(np.log(x[mask]), np.log(y[mask]), 1)[0]
|
|
333
|
+
|
|
334
|
+
t_slopes = np.array([calc_slope(dt, err[:,0]) for err in errors])
|
|
335
|
+
g_slopes = np.array([calc_slope(dx, err[-1,:]) for err in errors])
|
|
336
|
+
|
|
337
|
+
results = {
|
|
338
|
+
'dt': dt,
|
|
339
|
+
'dx': dx,
|
|
340
|
+
'error': errors if n_funcs > 1 else errors[0],
|
|
341
|
+
't_slopes': t_slopes if n_funcs > 1 else t_slopes[0],
|
|
342
|
+
'g_slopes': g_slopes if n_funcs > 1 else g_slopes[0],
|
|
343
|
+
'n_funcs': n_funcs,
|
|
344
|
+
't_order': timestepper.order,
|
|
345
|
+
'g_order': ODE.order,
|
|
346
|
+
}
|
|
347
|
+
return results
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
def plot_error_surface(series, log_axes=(True, True, True), z_max=0, title=None, alpha=0.4):
|
|
351
|
+
"""
|
|
352
|
+
Plot one or more 3D surfaces z(x, y) with semi-transparent tiles and solid mesh lines.
|
|
353
|
+
|
|
354
|
+
Parameters
|
|
355
|
+
----------
|
|
356
|
+
series : tuple[list] of dict
|
|
357
|
+
Each dict must have:
|
|
358
|
+
- 'dt': 1D array-like of dt-values (length Nx)
|
|
359
|
+
- 'dx': 1D array-like of dx-values (length Ny)
|
|
360
|
+
- 'error': 2D array-like of values Z(X, Y) with shape (Nx, Ny)
|
|
361
|
+
- 'name': (optional) label for legend
|
|
362
|
+
log_axes : tuple(bool, bool, bool)
|
|
363
|
+
(log_x, log_y, log_z): apply log10 to respective axis data when True.
|
|
364
|
+
For Z, nonpositive values are masked to NaN before log10.
|
|
365
|
+
title : str or None
|
|
366
|
+
Plot title.
|
|
367
|
+
alpha : float
|
|
368
|
+
Face transparency for surfaces.
|
|
369
|
+
"""
|
|
370
|
+
if not isinstance(series, (list, tuple)) or len(series) == 0:
|
|
371
|
+
raise ValueError("`series` must be a non-empty tuple/list of dictionaries.")
|
|
372
|
+
|
|
373
|
+
log_x, log_y, log_z = log_axes
|
|
374
|
+
|
|
375
|
+
# Distinct colors
|
|
376
|
+
base_colors = ['tab:red', 'tab:blue', 'tab:green', 'tab:gray',
|
|
377
|
+
'tab:purple', 'tab:brown', 'tab:pink', 'tab:orange',
|
|
378
|
+
'tab:olive', 'tab:cyan']
|
|
379
|
+
|
|
380
|
+
fig = plt.figure(figsize=(5, 5))
|
|
381
|
+
ax = fig.add_subplot(111, projection='3d')
|
|
382
|
+
legend_patches = []
|
|
383
|
+
|
|
384
|
+
count = 0
|
|
385
|
+
for i, s in enumerate(series):
|
|
386
|
+
if not isinstance(s, dict) or not all(k in s for k in ('dt', 'dx', 'error')):
|
|
387
|
+
raise ValueError(f"Item {i} must be a dict with keys 'dt', 'dx', 'error' (and optional 'name').")
|
|
388
|
+
|
|
389
|
+
x_in = np.asarray(s['dt'])
|
|
390
|
+
y_in = np.asarray(s['dx'])
|
|
391
|
+
Z = np.asarray(s['error'])
|
|
392
|
+
Z = np.expand_dims(Z, axis=0) if Z.ndim == 2 else Z
|
|
393
|
+
name = s.get('name', f'[{i}]')
|
|
394
|
+
|
|
395
|
+
# Handle (1D,1D,2D) or (2D,2D,2D)
|
|
396
|
+
if x_in.ndim == 1 and y_in.ndim == 1:
|
|
397
|
+
X, Y = np.meshgrid(x_in, y_in, indexing='ij') # (Nx, Ny)
|
|
398
|
+
else:
|
|
399
|
+
raise ValueError(f"Item {i}: dt and dx must both be 1D grids.")
|
|
400
|
+
|
|
401
|
+
if Z.shape[1:] != X.shape:
|
|
402
|
+
raise ValueError(f"Item {i}: z.shape {Z.shape} must match x/y grid shape {X.shape}.")
|
|
403
|
+
|
|
404
|
+
# Apply log scaling
|
|
405
|
+
Xp = np.log10(X) if log_x else X
|
|
406
|
+
Yp = np.log10(Y) if log_y else Y
|
|
407
|
+
if log_z:
|
|
408
|
+
Z = np.where(Z > 0, Z, np.nan)
|
|
409
|
+
Zp = np.log10(Z)
|
|
410
|
+
else:
|
|
411
|
+
Zp = Z
|
|
412
|
+
|
|
413
|
+
for j in range(s['n_funcs']):
|
|
414
|
+
color = base_colors[count % len(base_colors)]
|
|
415
|
+
ax.plot_surface(
|
|
416
|
+
Xp, Yp, Zp[j],
|
|
417
|
+
color=color, # uniform color per surface
|
|
418
|
+
alpha=alpha, # semi-transparent tiles
|
|
419
|
+
edgecolor=color, # solid mesh lines
|
|
420
|
+
linewidth=0.6,
|
|
421
|
+
antialiased=True,
|
|
422
|
+
shade=False
|
|
423
|
+
)
|
|
424
|
+
label = name + f"_u{j}" if j > 0 else name
|
|
425
|
+
legend_patches.append(Patch(facecolor=color, edgecolor=color, alpha=alpha, label=label))
|
|
426
|
+
count += 1
|
|
427
|
+
|
|
428
|
+
# Axis labels reflect log choice
|
|
429
|
+
ax.set_xlabel('log10(dt)' if log_x else 'dt')
|
|
430
|
+
ax.set_ylabel('log10(dx)' if log_y else 'dx')
|
|
431
|
+
ax.text2D(0.0, 0.8, 'log10(error)' if log_z else 'error',
|
|
432
|
+
transform=ax.transAxes, va="top", ha="left")
|
|
433
|
+
ax.set_zlim(top=z_max)
|
|
434
|
+
ax.set_title(title or 'Error Surfaces')
|
|
435
|
+
ax.view_init(elev=25., azim=-145, roll=0)
|
|
436
|
+
|
|
437
|
+
ax.legend(handles=legend_patches, loc='best')
|
|
438
|
+
fig.tight_layout()
|
|
439
|
+
plt.show()
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "evoxels"
|
|
7
|
-
version = "0.1.
|
|
7
|
+
version = "0.1.2"
|
|
8
8
|
description = "Differentiable physics framework for voxel-based microstructure simulations"
|
|
9
9
|
authors = [
|
|
10
10
|
{ name = "Simon Daubner", email = "s.daubner@imperial.ac.uk" }
|
|
@@ -57,7 +57,7 @@ mask_fun = 0.5 + 0.3*sp.cos(4*sp.pi*CS.x) * sp.cos(2*sp.pi*CS.y)
|
|
|
57
57
|
def test_reaction_diffusion_smoothed_boundary_rhs():
|
|
58
58
|
_, _, slope, order = rhs_convergence_test(
|
|
59
59
|
ODE_class = ReactionDiffusionSBM,
|
|
60
|
-
problem_kwargs = {"D": 1.0, "BC_type": 'dirichlet', "bcs": (0,1),},
|
|
60
|
+
problem_kwargs = {"D": 1.0, "BC_type": 'dirichlet', "bcs": (0,1), "bc_flux": 1},
|
|
61
61
|
test_function = test_fun_sbm,
|
|
62
62
|
mask_function = mask_fun,
|
|
63
63
|
convention = 'staggered_x',
|
|
@@ -14,7 +14,7 @@ def test_time_solver_multiple_fields():
|
|
|
14
14
|
vf.add_field("a", np.ones(vf.shape))
|
|
15
15
|
vf.add_field("b", np.zeros(vf.shape))
|
|
16
16
|
|
|
17
|
-
def step(
|
|
17
|
+
def step(t, u):
|
|
18
18
|
return u + 1
|
|
19
19
|
|
|
20
20
|
solver = TimeDependentSolver(vf, ["a", "b"], backend="torch", step_fn=step, device="cpu")
|
evoxels-0.1.1/evoxels/utils.py
DELETED
|
@@ -1,124 +0,0 @@
|
|
|
1
|
-
import numpy as np
|
|
2
|
-
import sympy as sp
|
|
3
|
-
import sympy.vector as spv
|
|
4
|
-
import evoxels as evo
|
|
5
|
-
from evoxels.problem_definition import SmoothedBoundaryODE
|
|
6
|
-
|
|
7
|
-
### Generalized test case
|
|
8
|
-
def rhs_convergence_test(
|
|
9
|
-
ODE_class, # an ODE class with callable rhs(field, t)->torch.Tensor (shape [x,y,z])
|
|
10
|
-
problem_kwargs, # problem parameters to instantiate ODE
|
|
11
|
-
test_function, # exact init_fun(x,y,z)->np.ndarray
|
|
12
|
-
mask_function=None,
|
|
13
|
-
convention="cell_center",
|
|
14
|
-
dtype="float32",
|
|
15
|
-
powers = np.array([3,4,5,6,7]),
|
|
16
|
-
backend = "torch"
|
|
17
|
-
):
|
|
18
|
-
"""Evaluate spatial order of an ODE right-hand side.
|
|
19
|
-
|
|
20
|
-
``test_function`` can be a single sympy expression or a list of
|
|
21
|
-
expressions representing multiple variables. The returned error and
|
|
22
|
-
slope arrays have one entry for each provided function.
|
|
23
|
-
|
|
24
|
-
Args:
|
|
25
|
-
ODE_class: an ODE class with callable rhs(field, t).
|
|
26
|
-
problem_kwargs: problem-specific parameters to instantiate ODE.
|
|
27
|
-
test_function: single sympy expression or a list of expressions.
|
|
28
|
-
mask_function: static mask for smoothed boundary method.
|
|
29
|
-
convention: grid convention.
|
|
30
|
-
dtype: floate precision (``float32`` or ``float64``).
|
|
31
|
-
powers: refine grid in powers of two (i.e. ``Nx = 2**p``).
|
|
32
|
-
backend: use ``torch`` or ``jax`` for testing.
|
|
33
|
-
"""
|
|
34
|
-
# Verify mask_function only used with SmoothedBoundaryODE
|
|
35
|
-
if mask_function is not None and not issubclass(ODE_class, SmoothedBoundaryODE):
|
|
36
|
-
raise TypeError(
|
|
37
|
-
f"Mask function provided but {ODE_class.__name__} "
|
|
38
|
-
"is not a SmoothedBoundaryODE."
|
|
39
|
-
)
|
|
40
|
-
CS = spv.CoordSys3D('CS')
|
|
41
|
-
# Prepare lambdified mask if needed
|
|
42
|
-
mask = (
|
|
43
|
-
sp.lambdify((CS.x, CS.y, CS.z), mask_function, "numpy")
|
|
44
|
-
if mask_function is not None
|
|
45
|
-
else None
|
|
46
|
-
)
|
|
47
|
-
|
|
48
|
-
if isinstance(test_function, (list, tuple)):
|
|
49
|
-
test_functions = list(test_function)
|
|
50
|
-
else:
|
|
51
|
-
test_functions = [test_function]
|
|
52
|
-
n_funcs = len(test_functions)
|
|
53
|
-
|
|
54
|
-
# Multiply test functions with mask for SBM testing
|
|
55
|
-
if mask is not None:
|
|
56
|
-
temp_list = []
|
|
57
|
-
for func in test_functions:
|
|
58
|
-
temp_list.append(func*mask_function)
|
|
59
|
-
test_functions = temp_list
|
|
60
|
-
|
|
61
|
-
dx = np.zeros(len(powers))
|
|
62
|
-
errors = np.zeros((n_funcs, len(powers)))
|
|
63
|
-
|
|
64
|
-
for i, p in enumerate(powers):
|
|
65
|
-
if convention == 'cell_center':
|
|
66
|
-
vf = evo.VoxelFields((2**p, 2**p, 2**p), (1, 1, 1), convention=convention)
|
|
67
|
-
elif convention == 'staggered_x':
|
|
68
|
-
vf = evo.VoxelFields((2**p + 1, 2**p, 2**p), (1, 1, 1), convention=convention)
|
|
69
|
-
vf.precision = dtype
|
|
70
|
-
grid = vf.meshgrid()
|
|
71
|
-
if backend == 'torch':
|
|
72
|
-
vg = evo.voxelgrid.VoxelGridTorch(vf.grid_info(), precision=vf.precision, device='cpu')
|
|
73
|
-
elif backend == 'jax':
|
|
74
|
-
vg = evo.voxelgrid.VoxelGridJax(vf.grid_info(), precision=vf.precision)
|
|
75
|
-
|
|
76
|
-
# Initialise fields
|
|
77
|
-
u_list = []
|
|
78
|
-
for func in test_functions:
|
|
79
|
-
init_fun = sp.lambdify((CS.x, CS.y, CS.z), func, "numpy")
|
|
80
|
-
init_data = init_fun(*grid)
|
|
81
|
-
u_list.append(vg.init_scalar_field(init_data))
|
|
82
|
-
|
|
83
|
-
u = vg.concatenate(u_list, 0)
|
|
84
|
-
u = vg.bc.trim_boundary_nodes(u)
|
|
85
|
-
|
|
86
|
-
# Init mask if smoothed boundary ODE
|
|
87
|
-
if mask is not None:
|
|
88
|
-
problem_kwargs["mask"] = mask(*grid)
|
|
89
|
-
|
|
90
|
-
ODE = ODE_class(vg, **problem_kwargs)
|
|
91
|
-
rhs_numeric = ODE.rhs(u, 0)
|
|
92
|
-
|
|
93
|
-
if n_funcs > 1 and mask is not None:
|
|
94
|
-
rhs_analytic = ODE.rhs_analytic(mask_function, test_functions, 0)
|
|
95
|
-
elif n_funcs > 1 and mask is None:
|
|
96
|
-
rhs_analytic = ODE.rhs_analytic(test_functions, 0)
|
|
97
|
-
elif n_funcs == 1 and mask is not None:
|
|
98
|
-
rhs_analytic = [ODE.rhs_analytic(mask_function, test_functions[0], 0)]
|
|
99
|
-
else:
|
|
100
|
-
rhs_analytic = [ODE.rhs_analytic(test_functions[0], 0)]
|
|
101
|
-
|
|
102
|
-
# Compute solutions
|
|
103
|
-
for j, func in enumerate(test_functions):
|
|
104
|
-
comp = vg.export_scalar_field_to_numpy(rhs_numeric[j:j+1])
|
|
105
|
-
exact_fun = sp.lambdify((CS.x, CS.y, CS.z), rhs_analytic[j], "numpy")
|
|
106
|
-
exact = exact_fun(*grid)
|
|
107
|
-
if convention == "staggered_x":
|
|
108
|
-
exact = exact[1:-1, :, :]
|
|
109
|
-
|
|
110
|
-
# Error norm
|
|
111
|
-
diff = comp - exact
|
|
112
|
-
errors[j, i] = np.linalg.norm(diff) / np.linalg.norm(exact)
|
|
113
|
-
dx[i] = vf.spacing[0]
|
|
114
|
-
|
|
115
|
-
# Fit slope after loop
|
|
116
|
-
slopes = np.array(
|
|
117
|
-
[np.polyfit(np.log(dx), np.log(err), 1)[0] for err in errors]
|
|
118
|
-
)
|
|
119
|
-
if slopes.size == 1:
|
|
120
|
-
slopes = slopes[0]
|
|
121
|
-
order = ODE.order
|
|
122
|
-
|
|
123
|
-
return dx, errors if errors.shape[0] > 1 else errors[0], slopes, order
|
|
124
|
-
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|