llg3d 2.0.0__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. llg3d/__init__.py +3 -3
  2. llg3d/__main__.py +2 -2
  3. llg3d/benchmarks/__init__.py +1 -0
  4. llg3d/benchmarks/compare_commits.py +321 -0
  5. llg3d/benchmarks/efficiency.py +451 -0
  6. llg3d/benchmarks/utils.py +25 -0
  7. llg3d/element.py +118 -31
  8. llg3d/grid.py +51 -64
  9. llg3d/io.py +395 -0
  10. llg3d/main.py +36 -38
  11. llg3d/parameters.py +159 -49
  12. llg3d/post/__init__.py +1 -1
  13. llg3d/post/extract.py +105 -0
  14. llg3d/post/info.py +178 -0
  15. llg3d/post/m1_vs_T.py +90 -0
  16. llg3d/post/m1_vs_time.py +56 -0
  17. llg3d/post/process.py +82 -75
  18. llg3d/post/utils.py +38 -0
  19. llg3d/post/x_profiles.py +141 -0
  20. llg3d/py.typed +1 -0
  21. llg3d/solvers/__init__.py +153 -0
  22. llg3d/solvers/base.py +345 -0
  23. llg3d/solvers/experimental/__init__.py +9 -0
  24. llg3d/solvers/experimental/jax.py +361 -0
  25. llg3d/solvers/math_utils.py +41 -0
  26. llg3d/solvers/mpi.py +370 -0
  27. llg3d/solvers/numpy.py +126 -0
  28. llg3d/solvers/opencl.py +439 -0
  29. llg3d/solvers/profiling.py +38 -0
  30. {llg3d-2.0.0.dist-info → llg3d-3.0.0.dist-info}/METADATA +6 -3
  31. llg3d-3.0.0.dist-info/RECORD +36 -0
  32. {llg3d-2.0.0.dist-info → llg3d-3.0.0.dist-info}/WHEEL +1 -1
  33. llg3d-3.0.0.dist-info/entry_points.txt +9 -0
  34. llg3d/output.py +0 -108
  35. llg3d/post/plot_results.py +0 -65
  36. llg3d/post/temperature.py +0 -83
  37. llg3d/simulation.py +0 -104
  38. llg3d/solver/__init__.py +0 -45
  39. llg3d/solver/jax.py +0 -383
  40. llg3d/solver/mpi.py +0 -449
  41. llg3d/solver/numpy.py +0 -210
  42. llg3d/solver/opencl.py +0 -329
  43. llg3d/solver/solver.py +0 -93
  44. llg3d-2.0.0.dist-info/RECORD +0 -25
  45. llg3d-2.0.0.dist-info/entry_points.txt +0 -4
  46. {llg3d-2.0.0.dist-info → llg3d-3.0.0.dist-info}/licenses/AUTHORS +0 -0
  47. {llg3d-2.0.0.dist-info → llg3d-3.0.0.dist-info}/licenses/LICENSE +0 -0
  48. {llg3d-2.0.0.dist-info → llg3d-3.0.0.dist-info}/top_level.txt +0 -0
llg3d/solvers/mpi.py ADDED
@@ -0,0 +1,370 @@
1
+ """
2
+ LLG3D solver using MPI.
3
+
4
+ The parallelization is done in the x direction.
5
+ """
6
+
7
+ import argparse
8
+ import sys
9
+ import time
10
+
11
+ import mpi4py
12
+ import numpy as np
13
+
14
+ from .. import solvers
15
+ from .base import BaseSolver
16
+ from .math_utils import cross_product
17
+ from .profiling import timeit
18
+
19
+ mpi4py.rc.initialize = False
20
+ mpi4py.rc.finalize = True
21
+
22
+ from mpi4py import MPI # noqa: E402
23
+
24
+ comm: MPI.Comm
25
+ rank: int
26
+ size: int
27
+ status: MPI.Status
28
+
29
+ Boundaries = tuple[np.ndarray, np.ndarray, MPI.Request | None, MPI.Request | None]
30
+ Boundaries_3d = tuple[Boundaries, Boundaries, Boundaries]
31
+
32
+
33
+ def initialize_mpi():
34
+ """Initialize MPI variables."""
35
+ if not MPI.Is_initialized():
36
+ MPI.Init()
37
+
38
+ global comm, rank, size, status
39
+ # Set MPI variables with the actual MPI values
40
+ comm = MPI.COMM_WORLD
41
+ rank = comm.Get_rank()
42
+ size = comm.Get_size()
43
+ status = MPI.Status()
44
+ # Set these variables in the solver module as well
45
+ solvers.comm = comm
46
+ solvers.rank = rank
47
+ solvers.size = size
48
+ solvers.status = status
49
+ solvers.mpi_initialized = True
50
+
51
+
52
+ class MPISolver(BaseSolver):
53
+ """MPI LLG3D solver."""
54
+
55
+ solver_type = "mpi"
56
+
57
+ def __post_init__(self):
58
+ """Initialize MPI and check parameters."""
59
+ initialize_mpi()
60
+ super().__post_init__()
61
+ if self.Jx % size != 0:
62
+ raise ValueError(
63
+ f"Jx ({self.Jx}) must be divisible by the number of processes ({size})"
64
+ )
65
+
66
+ def _get_boundaries_x(self, m: np.ndarray) -> Boundaries:
67
+ """
68
+ Returns the boundaries asynchronously.
69
+
70
+ Allows overlapping communication time of boundaries with calculations
71
+ if non-blocking communication is used.
72
+
73
+ Args:
74
+ m: Magnetization array (shape (3, nx, ny, nz))
75
+
76
+ Returns:
77
+ - m_i_x_start: Start boundary in x direction
78
+ - m_i_x_end: End boundary in x direction
79
+ - request_start: Request for start boundary or None
80
+ - request_end: Request for end boundary or None
81
+ """
82
+ # Extract slices for Neumann boundary conditions
83
+
84
+ # Create boundary buffers with the same dtype as `m` to avoid
85
+ # MPI packing/unpacking issues when precision is `single`.
86
+ m_i_x_start = np.empty((1, self.grid.Jy, self.grid.Jz), dtype=self.np_float)
87
+ m_i_x_end = np.empty_like(m_i_x_start)
88
+
89
+ # Prepare ring communication:
90
+ # Even if procs 0 and size - 1 shouldn't receive anything from left
91
+ # and right respectively, it's simpler to express it like this
92
+ right = (rank + 1) % size
93
+ left = (rank - 1 + size) % size
94
+
95
+ if self.blocking:
96
+ # Wait for boundaries to be available
97
+ comm.Sendrecv(
98
+ m[:1, :, :], dest=left, sendtag=0, recvbuf=m_i_x_end, source=right
99
+ )
100
+ comm.Sendrecv(
101
+ m[-1:, :, :], dest=right, sendtag=1, recvbuf=m_i_x_start, source=left
102
+ )
103
+ return m_i_x_start, m_i_x_end, None, None
104
+ else:
105
+ request_start = comm.Irecv(m_i_x_start, source=left, tag=201)
106
+ request_end = comm.Irecv(m_i_x_end, source=right, tag=202)
107
+ comm.Isend(m[-1:, :, :], dest=right, tag=201)
108
+ comm.Isend(m[:1, :, :], dest=left, tag=202)
109
+
110
+ return m_i_x_start, m_i_x_end, request_start, request_end
111
+
112
+ @timeit
113
+ def _get_boundaries_3d(self, m: np.ndarray) -> Boundaries_3d:
114
+ """
115
+ Returns the boundaries for all components.
116
+
117
+ Args:
118
+ m: Magnetization array (shape (3, nx, ny, nz))
119
+
120
+ Returns:
121
+ Boundaries for all three components
122
+ """
123
+ return (
124
+ self._get_boundaries_x(m[0]),
125
+ self._get_boundaries_x(m[1]),
126
+ self._get_boundaries_x(m[2]),
127
+ )
128
+
129
+ def laplacian_3d(
130
+ self,
131
+ m_i: np.ndarray,
132
+ m_i_x_start: np.ndarray,
133
+ m_i_x_end: np.ndarray,
134
+ request_start: MPI.Request | None,
135
+ request_end: MPI.Request | None,
136
+ ) -> np.ndarray:
137
+ """
138
+ Returns the Laplacian of m_i in 3D.
139
+
140
+ We start by calculating contributions in y and z, to wait
141
+ for the end of communications in x.
142
+
143
+ Args:
144
+ m_i: i-component of the magnetization array (shape (nx, ny, nz))
145
+ m_i_x_start: start boundary in x direction
146
+ m_i_x_end: end boundary in x direction
147
+ request_start: request for start boundary
148
+ request_end: request for end boundary
149
+
150
+ Returns:
151
+ Laplacian of m_i (shape (nx, ny, nz))
152
+ """
153
+ # Extract slices for Neumann boundary conditions
154
+ m_i_y_start = m_i[:, 1:2, :]
155
+ m_i_y_end = m_i[:, -2:-1, :]
156
+
157
+ m_i_z_start = m_i[:, :, 1:2]
158
+ m_i_z_end = m_i[:, :, -2:-1]
159
+
160
+ m_i_y_plus = np.concatenate((m_i[:, 1:, :], m_i_y_end), axis=1)
161
+ m_i_y_minus = np.concatenate((m_i_y_start, m_i[:, :-1, :]), axis=1)
162
+ m_i_z_plus = np.concatenate((m_i[:, :, 1:], m_i_z_end), axis=2)
163
+ m_i_z_minus = np.concatenate((m_i_z_start, m_i[:, :, :-1]), axis=2)
164
+
165
+ if self.grid.uniform: # Uniform grid spacing: limit truncature errors
166
+ laplacian = self.grid.inv_dx2 * (
167
+ m_i_y_plus + m_i_y_minus + m_i_z_plus + m_i_z_minus - 6 * m_i
168
+ )
169
+ else:
170
+ laplacian = (
171
+ self.grid.inv_dy2 * (m_i_y_plus + m_i_y_minus)
172
+ + self.grid.inv_dz2 * (m_i_z_plus + m_i_z_minus)
173
+ + self.grid.center_coeff * m_i
174
+ )
175
+
176
+ # Wait for x-boundaries to be available (communications completed)
177
+ if request_start is not None: # non blocking communication
178
+ request_start.Wait(status)
179
+ if request_end is not None: # non blocking communication
180
+ request_end.Wait(status)
181
+
182
+ # For extreme procs, apply Neumann boundary conditions in x
183
+ if rank == size - 1:
184
+ m_i_x_end = m_i[-2:-1, :, :]
185
+ if rank == 0:
186
+ m_i_x_start = m_i[1:2, :, :]
187
+
188
+ m_i_x_plus = np.concatenate((m_i[1:, :, :], m_i_x_end), axis=0)
189
+ m_i_x_minus = np.concatenate((m_i_x_start, m_i[:-1, :, :]), axis=0)
190
+
191
+ laplacian += self.grid.inv_dx2 * (m_i_x_plus + m_i_x_minus)
192
+
193
+ return laplacian
194
+
195
+ @timeit
196
+ def compute_laplacian(self, m: np.ndarray, boundaries: Boundaries_3d) -> np.ndarray:
197
+ """
198
+ Compute the laplacian of m in 3D.
199
+
200
+ Args:
201
+ m: Magnetization array (shape (3, nx, ny, nz))
202
+ boundaries: Boundaries for x direction
203
+
204
+ Returns:
205
+ Laplacian of m (shape (3, nx, ny, nz))
206
+ """
207
+ return np.stack(
208
+ [
209
+ self.laplacian_3d(m[0], *boundaries[0]),
210
+ self.laplacian_3d(m[1], *boundaries[1]),
211
+ self.laplacian_3d(m[2], *boundaries[2]),
212
+ ],
213
+ axis=0,
214
+ )
215
+
216
+ @timeit
217
+ def compute_slope(
218
+ self,
219
+ m: np.ndarray,
220
+ R_random: np.ndarray,
221
+ H_aniso: np.ndarray,
222
+ boundaries: Boundaries_3d,
223
+ ) -> np.ndarray:
224
+ """
225
+ Compute the slope of the LLG equation.
226
+
227
+ Args:
228
+ m: Magnetization array (shape (3, nx, ny, nz))
229
+ R_random: Random field array (shape (3, nx, ny, nz))
230
+ H_aniso: Pre-allocated buffer for anisotropy field.
231
+ boundaries: Boundaries for x direction
232
+
233
+ Returns:
234
+ Slope array (shape (3, nx, ny, nz))
235
+ """
236
+ # Precalculate terms used multiple times
237
+
238
+ self.elem.compute_H_anisotropy(m, H_aniso)
239
+
240
+ laplacian_m = self.compute_laplacian(m, boundaries)
241
+ R_eff = self.elem.coeff_1 * laplacian_m + R_random + H_aniso
242
+ R_eff[0] += self.elem.coeff_3
243
+
244
+ m_cross_R_eff = cross_product(m, R_eff)
245
+ m_cross_m_cross_R_eff = cross_product(m, m_cross_R_eff)
246
+
247
+ s = -(m_cross_R_eff + self.elem.lambda_G * m_cross_m_cross_R_eff)
248
+
249
+ return s
250
+
251
+ @timeit
252
+ def _xyz_average(self, m: np.ndarray) -> float:
253
+ """
254
+ Returns the spatial average of m with shape (g.dims) using the midpoint method.
255
+
256
+ Performs the local sum on each process and then reduces it to process 0.
257
+
258
+ Args:
259
+ m: Array to be integrated (shape (x, y, z))
260
+
261
+ Returns:
262
+ Spatial average of m
263
+ """
264
+ # Make a copy of m to avoid modifying its value
265
+ mm = m.copy()
266
+
267
+ # On y and z edges, divide the contribution by 2
268
+ mm[:, 0, :] /= 2
269
+ mm[:, -1, :] /= 2
270
+ mm[:, :, 0] /= 2
271
+ mm[:, :, -1] /= 2
272
+
273
+ # On x edges (only on extreme procs), divide the contribution by 2
274
+ if rank == 0:
275
+ mm[0] /= 2
276
+ if rank == size - 1:
277
+ mm[-1] /= 2
278
+ local_sum = mm.sum()
279
+
280
+ # Sum across all processes gathered by process 0
281
+ global_sum = comm.reduce(local_sum)
282
+
283
+ # Spatial average is the global sum divided by the number of cells
284
+ return float(global_sum) / self.grid.ncell if global_sum is not None else 0.0
285
+
286
+ @timeit
287
+ def _yz_average(self, m_i: np.ndarray) -> np.ndarray:
288
+ """
289
+ Compute the integral of m_i over y and z directions.
290
+
291
+ Gather the local profiles from all processes to form the global profile.
292
+
293
+ Args:
294
+ m_i: Magnetization component array (shape (nx, ny, nz))
295
+
296
+ Returns:
297
+ 1D array of the integral over y and z (shape (nx,))
298
+ """
299
+ local_yz_average = super()._yz_average(m_i)
300
+ # global coordinates
301
+ global_yz_average = np.empty(self.grid.Jx, dtype=self.np_float)
302
+ comm.Gather(local_yz_average, global_yz_average)
303
+ return global_yz_average
304
+
305
+ def _simulate(self) -> float:
306
+ """
307
+ Simulates the system for N iterations.
308
+
309
+ Returns:
310
+ The time taken for the simulation
311
+ """
312
+ m_n = self._init_m_n()
313
+ H_aniso = np.empty_like(m_n)
314
+ t = 0.0
315
+ self._record(m_n, t, 0) # Record the initial solution
316
+
317
+ start_time = time.perf_counter()
318
+
319
+ for n in self._progress_bar():
320
+ t += self.dt
321
+
322
+ # Prediction phase
323
+ x_boundaries: Boundaries_3d = self._get_boundaries_3d(m_n)
324
+
325
+ # Adding randomness: effect of temperature
326
+ R_random = self._get_R_random()
327
+
328
+ s_pre = self.compute_slope(m_n, R_random, H_aniso, x_boundaries)
329
+ m_pre = m_n + self.dt * s_pre
330
+
331
+ # Correction phase
332
+ x_boundaries = self._get_boundaries_3d(m_pre)
333
+
334
+ s_cor = self.compute_slope(m_pre, R_random, H_aniso, x_boundaries)
335
+ m_n += self.dt * 0.5 * (s_pre + s_cor)
336
+
337
+ # renormalize to verify the constraint of being on the sphere
338
+ self._normalize(m_n)
339
+
340
+ self._record(m_n, t, n)
341
+
342
+ total_time = time.perf_counter() - start_time
343
+
344
+ self._finalize()
345
+
346
+ return total_time
347
+
348
+
349
+ class ArgumentParser(argparse.ArgumentParser):
350
+ """An argument parser compatible with MPI."""
351
+
352
+ def _print_message(self, message, file=None):
353
+ if rank == 0 and message:
354
+ if file is None:
355
+ file = sys.stderr
356
+ file.write(message)
357
+
358
+ def exit(self, status: int = 0, message: str | None = None):
359
+ """
360
+ Exit the program using MPI finalize.
361
+
362
+ Args:
363
+ status: Exit status code
364
+ message: Optional exit message
365
+ """
366
+ if message:
367
+ self._print_message(message, sys.stderr)
368
+ comm.barrier()
369
+ MPI.Finalize()
370
+ exit(status)
llg3d/solvers/numpy.py ADDED
@@ -0,0 +1,126 @@
1
+ """LLG3D solver using NumPy."""
2
+
3
+ from typing import ClassVar
4
+ import time
5
+
6
+ import numpy as np
7
+
8
+ from .base import BaseSolver, timeit
9
+ from .math_utils import cross_product
10
+
11
+
12
+ class NumpySolver(BaseSolver):
13
+ """NumPy-based LLG3D solver."""
14
+
15
+ solver_type: ClassVar[str] = "numpy" #: Solver type name
16
+
17
+ @timeit
18
+ def laplacian_3d(self, m_i: np.ndarray) -> np.ndarray:
19
+ """
20
+ Returns the laplacian of m_i in 3D with Neumann boundary conditions.
21
+
22
+ Args:
23
+ m_i: Magnetization direction (shape (nx, ny, nz))
24
+
25
+ Returns:
26
+ Laplacian of m_i (shape (nx, ny, nz))
27
+ """
28
+ m_i_padded = np.pad(m_i, ((1, 1), (1, 1), (1, 1)), mode="reflect")
29
+
30
+ laplacian = (
31
+ self.grid.inv_dx2
32
+ * (m_i_padded[2:, 1:-1, 1:-1] + m_i_padded[:-2, 1:-1, 1:-1])
33
+ + self.grid.inv_dy2
34
+ * (m_i_padded[1:-1, 2:, 1:-1] + m_i_padded[1:-1, :-2, 1:-1])
35
+ + self.grid.inv_dz2
36
+ * (m_i_padded[1:-1, 1:-1, 2:] + m_i_padded[1:-1, 1:-1, :-2])
37
+ + self.grid.center_coeff * m_i
38
+ )
39
+ return laplacian
40
+
41
+ @timeit
42
+ def compute_laplacian(self, m: np.ndarray) -> np.ndarray:
43
+ """
44
+ Compute the laplacian of m in 3D with Neumann boundary conditions.
45
+
46
+ Args:
47
+ m: Magnetization array (shape (3, nx, ny, nz))
48
+
49
+ Returns:
50
+ Laplacian of m (shape (3, nx, ny, nz))
51
+ """
52
+ return np.stack(
53
+ [
54
+ self.laplacian_3d(m[0]),
55
+ self.laplacian_3d(m[1]),
56
+ self.laplacian_3d(m[2]),
57
+ ],
58
+ axis=0,
59
+ )
60
+
61
+ @timeit
62
+ def _compute_slope(
63
+ self, m_n: np.ndarray, R_random: np.ndarray, H_aniso: np.ndarray
64
+ ) -> np.ndarray:
65
+ """
66
+ Compute the slope of the LLG equation.
67
+
68
+ Args:
69
+ m_n: Magnetization array at time step n (shape (3, nx, ny, nz)).
70
+ R_random: Random field array (shape (3, nx, ny, nz)).
71
+ H_aniso: Optional pre-allocated buffer for anisotropy field.
72
+
73
+ Returns:
74
+ Slope array (shape (3, nx, ny, nz))
75
+ """
76
+ self.elem.compute_H_anisotropy(m_n, H_aniso)
77
+
78
+ laplacian_m = self.compute_laplacian(m_n)
79
+ R_eff = self.elem.coeff_1 * laplacian_m + R_random + H_aniso
80
+ R_eff[0] += self.elem.coeff_3
81
+
82
+ m_cross_R_eff = cross_product(m_n, R_eff)
83
+ m_cross_m_cross_R_eff = cross_product(m_n, m_cross_R_eff)
84
+
85
+ s = -(m_cross_R_eff + self.elem.lambda_G * m_cross_m_cross_R_eff)
86
+
87
+ return s
88
+
89
+ def _simulate(self) -> float:
90
+ """
91
+ Simulates the system for N iterations.
92
+
93
+ Returns:
94
+ The time taken for the simulation
95
+ """
96
+ m_n = self._init_m_n()
97
+ H_aniso = np.empty_like(m_n)
98
+ t = 0.0
99
+ self._record(m_n, t, 0) # Record the initial solution
100
+
101
+ start_time = time.perf_counter()
102
+
103
+ for n in self._progress_bar():
104
+ t += self.dt
105
+
106
+ # Adding randomness: temperature effect
107
+ R_random = self._get_R_random()
108
+
109
+ # Prediction phase
110
+ s_pre = self._compute_slope(m_n, R_random, H_aniso)
111
+ m_pre = m_n + self.dt * s_pre
112
+
113
+ # Correction phase
114
+ s_cor = self._compute_slope(m_pre, R_random, H_aniso)
115
+ m_n += self.dt * 0.5 * (s_pre + s_cor)
116
+
117
+ # We renormalize to verify the constraint of being on the sphere
118
+ self._normalize(m_n)
119
+
120
+ self._record(m_n, t, n)
121
+
122
+ total_time = time.perf_counter() - start_time
123
+
124
+ self._finalize()
125
+
126
+ return total_time